diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..851d7a07
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,11 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
+
+version: 2
+updates:
+ - package-ecosystem: "gomod"
+ directory: "/"
+ schedule:
+ interval: "daily"
diff --git a/.github/workflows/build-and-push.yml b/.github/workflows/build-and-push.yml
new file mode 100644
index 00000000..8226039f
--- /dev/null
+++ b/.github/workflows/build-and-push.yml
@@ -0,0 +1,61 @@
+name: "Build and push GARM images"
+on:
+ workflow_call:
+ inputs:
+ push_to_project:
+ description: "Project to build images for"
+ required: false
+ type: string
+ default: "ghcr.io/cloudbase"
+ ref:
+ description: "Ref to build"
+ required: false
+ type: string
+ default: "main"
+
+permissions:
+ contents: read
+
+jobs:
+ images:
+ permissions:
+ packages: write
+ name: "Build GARM images"
+ runs-on: ubuntu-latest
+ steps:
+ - name: "Checkout"
+ uses: actions/checkout@v4
+ with:
+ path: src/github.com/cloudbase/garm
+ fetch-depth: 0
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Build and push image
+ env:
+ IMAGE_REGISTRY: ${{ inputs.push_to_project }}
+ GH_REF: ${{ inputs.ref }}
+ working-directory: src/github.com/cloudbase/garm
+ run: |
+ if [ "$GH_REF" == "main" ]; then
+ IMAGE_TAG="nightly"
+ else
+ IMAGE_TAG=$(git describe --tags --match='v[0-9]*' --always ${GH_REF})
+ fi
+ docker buildx build \
+ --provenance=false \
+ --platform linux/amd64,linux/arm64 \
+ --label "org.opencontainers.image.source=https://github.com/cloudbase/garm/tree/${GH_REF}" \
+ --label "org.opencontainers.image.description=GARM ${GH_REF}" \
+ --label "org.opencontainers.image.licenses=Apache 2.0" \
+ --build-arg="GARM_REF=${GH_REF}" \
+ -t ${IMAGE_REGISTRY}/garm:"${IMAGE_TAG}" \
+ --push .
diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml
index 148c60de..bde4f0f0 100644
--- a/.github/workflows/go-tests.yml
+++ b/.github/workflows/go-tests.yml
@@ -4,9 +4,11 @@ on:
push:
branches:
- main
+ - 'release/**'
pull_request:
branches:
- main
+ - 'release/**'
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref_name }}
@@ -17,23 +19,22 @@ permissions: {}
jobs:
linters:
name: Linters
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Install dependencies
run: |
sudo apt-get update
- sudo apt-get install -y libbtrfs-dev build-essential
+ sudo apt-get install -y libbtrfs-dev build-essential apg jq
- - uses: actions/setup-go@v3
- with:
- go-version: 'stable'
- uses: actions/checkout@v3
- - uses: golangci/golangci-lint-action@v3
+ - uses: actions/setup-go@v5
with:
- skip-cache: true
- args: --timeout=8m --build-tags testing
+ go-version-file: go.mod
+
+ - name: make lint
+ run: make golangci-lint && GOLANGCI_LINT_EXTRA_ARGS="--timeout=8m --build-tags=testing,integration" make lint
- name: Verify go vendor, go modules and gofmt
run: |
sudo apt-get install -y jq
@@ -43,15 +44,39 @@ jobs:
runs-on: ubuntu-latest
needs: [linters]
steps:
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y libbtrfs-dev build-essential apg jq default-jre
+
+ - uses: actions/setup-node@v4
+ with:
+ node-version: '>=v24.5.0'
+
+ - name: Set up openapi-generator-cli
+ run: |
+ mkdir -p $HOME/openapi-generator
+ cd $HOME/openapi-generator
+ npm install @openapitools/openapi-generator-cli
+ echo "$HOME/openapi-generator/node_modules/.bin" >> $GITHUB_PATH
+
- name: Checkout
uses: actions/checkout@v3
- name: Setup Golang
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
go-version-file: go.mod
- run: go version
+ - name: Run go generate
+ run: |
+ GOTOOLCHAIN=go1.24.6 make generate
+
- name: Run GARM Go Tests
run: make go-test
+
+ - name: Run web UI tests
+ run: |
+ make webui-test
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
new file mode 100644
index 00000000..04072b20
--- /dev/null
+++ b/.github/workflows/integration-tests.yml
@@ -0,0 +1,122 @@
+name: Integration Tests
+on:
+ workflow_dispatch: {}
+ schedule:
+ - cron: "0 0 * * *"
+
+jobs:
+ integration-tests:
+ runs-on: ubuntu-noble-garm
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Setup Golang
+ uses: actions/setup-go@v3
+ with:
+ go-version-file: go.mod
+
+ - name: Setup LXD
+ uses: canonical/setup-lxd@main
+ with:
+ channel: latest/stable
+
+ - name: Install dependencies
+ run: |
+ sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
+ sudo apt-get -qq update && sudo apt-get -qq install -y apg coreutils make jq build-essential libsqlite3-dev libsqlite3-0
+
+ - name: Set up tunnel
+ shell: bash
+ run: |
+ mkdir -p /home/runner/.ssh
+ echo "${{ secrets.SSH_PRIVATE_KEY }}" > /home/runner/.ssh/ssh_key
+ sudo chown -R runner:runner /home/runner/.ssh
+ sudo chmod 500 /home/runner/.ssh
+ sudo chmod 400 /home/runner/.ssh/ssh_key
+
+ SUBDOMAIN=$(apg -a 0 -M l -m 12 -n 1)
+ echo "::add-mask::$SUBDOMAIN"
+
+ BASE_URL="${{ secrets.TUNNEL_BASE_URL }}"
+ GARM_BASE_URL="https://$SUBDOMAIN.$BASE_URL"
+ echo "::add-mask::$GARM_BASE_URL"
+
+ echo "GARM_BASE_URL=$GARM_BASE_URL" >> $GITHUB_ENV
+
+ cat <> $GITHUB_ENV
+ echo "REPO_WEBHOOK_SECRET=$REPO_WEBHOOK_SECRET" >> $GITHUB_ENV
+ echo "ORG_WEBHOOK_SECRET=$ORG_WEBHOOK_SECRET" >> $GITHUB_ENV
+ echo "GARM_CHECKOUT_DIR=$GITHUB_WORKSPACE" >> $GITHUB_ENV
+
+ - name: Create logs directory
+ if: always()
+ run: sudo mkdir -p /artifacts-logs && sudo chmod 777 /artifacts-logs
+
+ - name: Run integration tests
+ run: |
+ set -o pipefail
+ set -o errexit
+ make integration 2>&1
+ env:
+ ORG_NAME: gsamfira
+ REPO_NAME: garm-testing
+ CREDENTIALS_NAME: test-garm-creds
+ WORKFLOW_FILE_NAME: test.yml
+ GH_TOKEN: ${{ secrets.GH_OAUTH_TOKEN }}
+ LXD_REMOTE_SERVER: ${{ secrets.LXD_REMOTE_SERVER }}
+
+ - name: Show GARM logs
+ if: always()
+ run: |
+ sudo systemctl status garm@runner || true
+ sudo journalctl --no-pager 2>&1 > /artifacts-logs/system.log
+ sudo journalctl -u garm@runner --no-pager 2>&1 > /artifacts-logs/garm.log
+
+ - name: Upload GARM and e2e logs
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: garm-logs
+ path: /artifacts-logs
+
+ - name: Cleanup orphan GARM resources via GitHub API
+ if: always()
+ run: |
+ set -o pipefail
+ set -o errexit
+
+ sudo systemctl stop garm@runner || true
+ go run ./test/integration/gh_cleanup/main.go || true
+ env:
+ ORG_NAME: gsamfira
+ REPO_NAME: garm-testing
+ GH_TOKEN: ${{ secrets.GH_OAUTH_TOKEN }}
diff --git a/.github/workflows/trigger-manual.yml b/.github/workflows/trigger-manual.yml
new file mode 100644
index 00000000..faf166d4
--- /dev/null
+++ b/.github/workflows/trigger-manual.yml
@@ -0,0 +1,19 @@
+name: Manual build of GARM images
+on:
+ workflow_dispatch:
+ inputs:
+ push_to_project:
+ description: "Project to build images for"
+ required: true
+ default: "ghcr.io/cloudbase"
+ ref:
+ description: "Ref to build"
+ required: true
+ default: "main"
+
+jobs:
+ call-build-and-push:
+ uses: ./.github/workflows/build-and-push.yml
+ with:
+ push_to_project: ${{ inputs.push_to_project }}
+ ref: ${{ inputs.ref }}
\ No newline at end of file
diff --git a/.github/workflows/trigger-nightly.yml b/.github/workflows/trigger-nightly.yml
new file mode 100644
index 00000000..e0b83856
--- /dev/null
+++ b/.github/workflows/trigger-nightly.yml
@@ -0,0 +1,10 @@
+name: Nightly build of GARM images
+on:
+ schedule:
+ - cron: "0 2 * * *"
+
+jobs:
+ call-build-and-push:
+ uses: ./.github/workflows/build-and-push.yml
+ with:
+ ref: "main"
diff --git a/.gitignore b/.gitignore
index c4a5d98b..54c931c8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,6 +4,7 @@
*.dll
*.so
*.dylib
+*.DS_Store
# Test binary, built with `go test -c`
*.test
@@ -16,3 +17,11 @@ bin/
# vendor/
.vscode
cmd/temp
+build/
+release/
+node_modules/
+.svelte-kit/
+debug.html
+git_push.sh
+webapp/src/lib/api/generated/docs
+.env
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 00000000..8dee07f5
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: MIT
+linters:
+ disable-all: true
+ fast: false
+ enable:
+ - gci
+ - goconst
+ - gocritic
+ - gocyclo
+ - gofmt
+ - gofumpt
+ - goimports
+ - godox
+ - govet
+ - gosec
+ - gosimple
+ - importas
+ - ineffassign
+ - loggercheck
+ - misspell
+ - nakedret
+ - nilerr
+ - predeclared
+ - promlinter
+ - revive
+ - staticcheck
+ - unconvert
+ - unused
+ - wastedassign
+ - whitespace
+
+linters-settings:
+ gci:
+ sections:
+ - standard
+ - default
+ - prefix(github.com/cloudbase/garm)
+
+ goimports:
+ local-prefixes: github.com/cloudbase/garm
+
+ gosec:
+ excludes:
+ - G115
diff --git a/.mockery.yaml b/.mockery.yaml
new file mode 100644
index 00000000..b7858821
--- /dev/null
+++ b/.mockery.yaml
@@ -0,0 +1,27 @@
+with-expecter: true
+dir: "mocks"
+mockname: "{{ .InterfaceName }}"
+outpkg: "mocks"
+filename: "{{ .InterfaceName }}.go"
+# V3 compatibility settings
+resolve-type-alias: false
+disable-version-string: true
+issue-845-fix: true
+packages:
+ # Database store interfaces
+ github.com/cloudbase/garm/database/common:
+ interfaces:
+ Store:
+ config:
+ dir: "{{ .InterfaceDir }}/mocks"
+ # Runner interfaces
+ github.com/cloudbase/garm/runner:
+ interfaces:
+ PoolManagerController:
+ config:
+ dir: "{{ .InterfaceDir }}/mocks"
+ # Runner common interfaces (generate all interfaces in this package)
+ github.com/cloudbase/garm/runner/common:
+ config:
+ dir: "{{ .InterfaceDir }}/mocks"
+ all: true
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index 860c6ce6..81033292 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,11 +1,77 @@
-FROM docker.io/golang:alpine
+FROM docker.io/golang:alpine AS builder
+ARG GARM_REF
-WORKDIR /root
-USER root
+LABEL stage=builder
-RUN apk add musl-dev gcc libtool m4 autoconf g++ make libblkid util-linux-dev git linux-headers mingw-w64-gcc
+RUN apk add --no-cache musl-dev gcc libtool m4 autoconf g++ make libblkid util-linux-dev git linux-headers upx curl jq
+RUN git config --global --add safe.directory /build && git config --global --add advice.detachedHead false
+RUN echo ${GARM_REF}
-ADD ./scripts/build-static.sh /build-static.sh
-RUN chmod +x /build-static.sh
+ADD . /build/garm
-CMD ["/bin/sh"]
+RUN git -C /build/garm checkout ${GARM_REF}
+RUN cd /build/garm \
+ && go build -o /bin/garm \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-linkmode external -extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" \
+ /build/garm/cmd/garm && upx /bin/garm
+RUN cd /build/garm/cmd/garm-cli \
+ && go build -o /bin/garm-cli \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-linkmode external -extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" \
+ . && upx /bin/garm-cli
+RUN set -ex; \
+ mkdir -p /opt/garm/providers.d; \
+ for repo in \
+ cloudbase/garm-provider-azure \
+ cloudbase/garm-provider-openstack \
+ cloudbase/garm-provider-lxd \
+ cloudbase/garm-provider-incus \
+ cloudbase/garm-provider-aws \
+ cloudbase/garm-provider-gcp \
+ cloudbase/garm-provider-equinix \
+ flatcar/garm-provider-linode \
+ mercedes-benz/garm-provider-k8s; \
+ do \
+ export PROVIDER_NAME="$(basename $repo)"; \
+ export PROVIDER_SUBDIR=""; \
+ if [ "$GARM_REF" == "main" ]; then \
+ export PROVIDER_REF="main"; \
+ else \
+ export PROVIDER_REF="$(curl -s -L https://api.github.com/repos/$repo/releases/latest | jq -r '.tag_name')"; \
+ fi; \
+ git clone --branch "$PROVIDER_REF" "https://github.com/$repo" "/build/$PROVIDER_NAME"; \
+ case $PROVIDER_NAME in \
+ "garm-provider-k8s") \
+ export PROVIDER_SUBDIR="cmd/garm-provider-k8s"; \
+ export PROVIDER_LDFLAGS="-linkmode external -extldflags \"-static\" -s -w"; \
+ ;; \
+ "garm-provider-linode") \
+ export PROVIDER_LDFLAGS="-linkmode external -extldflags \"-static\" -s -w"; \
+ ;; \
+ *) \
+ export PROVIDER_VERSION=$(git -C /build/$PROVIDER_NAME describe --tags --match='v[0-9]*' --dirty --always); \
+ export PROVIDER_LDFLAGS="-linkmode external -extldflags \"-static\" -s -w -X main.Version=$PROVIDER_VERSION"; \
+ ;; \
+ esac; \
+ cd "/build/$PROVIDER_NAME/$PROVIDER_SUBDIR" \
+ && go build -ldflags="$PROVIDER_LDFLAGS" -o /opt/garm/providers.d/$PROVIDER_NAME . \
+ && upx /opt/garm/providers.d/$PROVIDER_NAME; \
+ done
+
+FROM busybox
+
+COPY --from=builder /bin/garm /bin/garm
+COPY --from=builder /bin/garm-cli /bin/garm-cli
+COPY --from=builder /opt/garm/providers.d/garm-provider-openstack /opt/garm/providers.d/garm-provider-openstack
+COPY --from=builder /opt/garm/providers.d/garm-provider-lxd /opt/garm/providers.d/garm-provider-lxd
+COPY --from=builder /opt/garm/providers.d/garm-provider-incus /opt/garm/providers.d/garm-provider-incus
+COPY --from=builder /opt/garm/providers.d/garm-provider-azure /opt/garm/providers.d/garm-provider-azure
+COPY --from=builder /opt/garm/providers.d/garm-provider-aws /opt/garm/providers.d/garm-provider-aws
+COPY --from=builder /opt/garm/providers.d/garm-provider-gcp /opt/garm/providers.d/garm-provider-gcp
+COPY --from=builder /opt/garm/providers.d/garm-provider-equinix /opt/garm/providers.d/garm-provider-equinix
+
+COPY --from=builder /opt/garm/providers.d/garm-provider-k8s /opt/garm/providers.d/garm-provider-k8s
+COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
+
+ENTRYPOINT ["/bin/garm", "-config", "/etc/garm/config.toml"]
diff --git a/Dockerfile.build-static b/Dockerfile.build-static
new file mode 100644
index 00000000..2ed27168
--- /dev/null
+++ b/Dockerfile.build-static
@@ -0,0 +1,17 @@
+FROM docker.io/golang:alpine
+
+WORKDIR /root
+USER root
+
+RUN apk add musl-dev gcc libtool m4 autoconf g++ make libblkid util-linux-dev git linux-headers mingw-w64-gcc
+
+RUN wget http://musl.cc/aarch64-linux-musl-cross.tgz -O /tmp/aarch64-linux-musl-cross.tgz && \
+ tar --strip-components=1 -C /usr/local -xzf /tmp/aarch64-linux-musl-cross.tgz && \
+ rm /tmp/aarch64-linux-musl-cross.tgz
+
+ADD ./scripts/build-static.sh /build-static.sh
+RUN chmod +x /build-static.sh
+
+ADD . /build/garm
+
+CMD ["/bin/sh"]
diff --git a/Makefile b/Makefile
index fbd2dbf2..714d2465 100644
--- a/Makefile
+++ b/Makefile
@@ -1,53 +1,139 @@
-SHELL := bash
+SHELL := /bin/bash
+export SHELLOPTS:=$(if $(SHELLOPTS),$(SHELLOPTS):)pipefail:errexit
+.ONESHELL:
+
+GEN_PASSWORD=$(shell (/usr/bin/apg -n1 -m32))
IMAGE_TAG = garm-build
-USER_ID=$(shell ((docker --version | grep -q podman) && echo "0" || id -u))
-USER_GROUP=$(shell ((docker --version | grep -q podman) && echo "0" || id -g))
+IMAGE_BUILDER=$(shell (which docker || which podman))
+IS_PODMAN=$(shell (($(IMAGE_BUILDER) --version | grep -q podman) && echo "yes" || echo "no"))
+USER_ID=$(if $(filter yes,$(IS_PODMAN)),0,$(shell id -u))
+USER_GROUP=$(if $(filter yes,$(IS_PODMAN)),0,$(shell id -g))
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
GOPATH ?= $(shell go env GOPATH)
VERSION ?= $(shell git describe --tags --match='v[0-9]*' --dirty --always)
+GARM_REF ?= $(shell git rev-parse --abbrev-ref HEAD)
GO ?= go
+export GARM_PASSWORD ?= ${GEN_PASSWORD}
+export REPO_WEBHOOK_SECRET = ${GEN_PASSWORD}
+export ORG_WEBHOOK_SECRET = ${GEN_PASSWORD}
+export CREDENTIALS_NAME ?= test-garm-creds
+export WORKFLOW_FILE_NAME ?= test.yml
+export GARM_ADMIN_USERNAME ?= admin
+
+ifeq ($(IS_PODMAN),yes)
+ EXTRA_ARGS := -v /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt
+endif
+
+
+.PHONY: help
+help: ## Display this help.
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
default: build
-.PHONY : build-static test install-lint-deps lint go-test fmt fmtcheck verify-vendor verify
-build-static:
- @echo Building garm
- docker build --tag $(IMAGE_TAG) .
- docker run --rm -e USER_ID=$(USER_ID) -e USER_GROUP=$(USER_GROUP) -v $(PWD):/build/garm:z $(IMAGE_TAG) /build-static.sh
- @echo Binaries are available in $(PWD)/bin
+##@ Build
-build:
+.PHONY : build-static test install-lint-deps lint go-test fmt fmtcheck verify-vendor verify create-release-files release
+build-static: ## Build garm statically
+ @echo Building garm
+ $(IMAGE_BUILDER) build $(EXTRA_ARGS) --tag $(IMAGE_TAG) -f Dockerfile.build-static .
+ mkdir -p build
+ $(IMAGE_BUILDER) run --rm -e USER_ID=$(USER_ID) -e GARM_REF=$(GARM_REF) -e USER_GROUP=$(USER_GROUP) -v $(PWD)/build:/build/output:z $(IMAGE_TAG) /build-static.sh
+ @echo Binaries are available in $(PWD)/build
+
+clean: ## Clean up build artifacts
+ @rm -rf ./bin ./build ./release
+
+.PHONY: build
+build: ## Build garm
@echo Building garm ${VERSION}
$(shell mkdir -p ./bin)
- @$(GO) build -ldflags "-s -w -X main.Version=${VERSION}" -tags osusergo,netgo,sqlite_omit_load_extension -o bin/garm ./cmd/garm
- @$(GO) build -ldflags "-s -w -X github.com/cloudbase/garm/cmd/garm-cli/cmd.Version=${VERSION}" -tags osusergo,netgo,sqlite_omit_load_extension -o bin/garm-cli ./cmd/garm-cli
+ @$(GO) build -ldflags "-s -w -X github.com/cloudbase/garm/util/appdefaults.Version=${VERSION}" -tags osusergo,netgo,sqlite_omit_load_extension -o bin/garm ./cmd/garm
+ @$(GO) build -ldflags "-s -w -X github.com/cloudbase/garm/util/appdefaults.Version=${VERSION}" -tags osusergo,netgo,sqlite_omit_load_extension -o bin/garm-cli ./cmd/garm-cli
@echo Binaries are available in $(PWD)/bin
-test: verify go-test
+.PHONY: build-webui
+build-webui:
+ @echo Building GARM web ui
+ ./build-webapp.sh
+ rm -rf webapp/assets/_app
+ cp -r webapp/build/* webapp/assets/
-install-lint-deps:
- @$(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
+.PHONY: generate
+generate: ## Run go generate after checking required tools are in PATH
+ @echo Checking required tools...
+ @which openapi-generator-cli > /dev/null || (echo "Error: openapi-generator-cli not found in PATH" && exit 1)
+ @echo Running go generate
+ @$(GO) generate ./...
-lint:
- @golangci-lint run --timeout=8m --build-tags testing
+test: verify go-test ## Run tests
-go-test:
- @$(GO) test -race -mod=vendor -tags testing -v $(TEST_ARGS) -timeout=15m -parallel=4 -count=1 ./...
+##@ Release
+create-release-files:
+ ./scripts/make-release.sh
-fmt:
- @$(GO) fmt $$(go list ./...)
+release: build-static create-release-files ## Create a release
-fmtcheck:
- @gofmt -l -s $$(go list ./... | sed 's|github.com/cloudbase/garm/||g') | grep ".*\.go"; if [ "$$?" -eq 0 ]; then echo "gofmt check failed; please run gofmt -w -s"; exit 1;fi
+##@ Lint / Verify
+.PHONY: lint
+lint: golangci-lint $(GOLANGCI_LINT) ## Run linting.
+ $(GOLANGCI_LINT) run -v --build-tags=testing,integration $(GOLANGCI_LINT_EXTRA_ARGS)
+
+.PHONY: lint-fix
+lint-fix: golangci-lint $(GOLANGCI_LINT) ## Lint the codebase and run auto-fixers if supported by the linte
+ GOLANGCI_LINT_EXTRA_ARGS=--fix $(MAKE) lint
verify-vendor: ## verify if all the go.mod/go.sum files are up-to-date
$(eval TMPDIR := $(shell mktemp -d))
- @cp -R ${ROOTDIR} ${TMPDIR}
+ @cp -R ${ROOTDIR} ${TMPDIR}/.
@(cd ${TMPDIR}/garm && ${GO} mod tidy)
@diff -r -u -q ${ROOTDIR} ${TMPDIR}/garm >/dev/null 2>&1; if [ "$$?" -ne 0 ];then echo "please run: go mod tidy && go mod vendor"; exit 1; fi
@rm -rf ${TMPDIR}
-verify: verify-vendor lint fmtcheck
+verify: verify-vendor lint fmtcheck ## Run all verify-* targets
+
+integration: build ## Run integration tests
+ function cleanup {
+ if [ -e "$$GITHUB_ENV" ];then
+ source $$GITHUB_ENV
+ fi
+ ./test/integration/scripts/taredown_garm.sh
+ $(GO) run ./test/integration/gh_cleanup/main.go
+ }
+ trap cleanup EXIT
+ @./test/integration/scripts/setup-garm.sh
+ @$(GO) test -v ./test/integration/. -timeout=30m -tags=integration
+
+##@ Development
+
+go-test: ## Run tests
+ @$(GO) test -race -mod=vendor -tags testing -v $(TEST_ARGS) -timeout=15m -parallel=4 -count=1 ./...
+
+fmt: ## Run go fmt against code.
+ @$(GO) fmt $$(go list ./...)
+
+webui-test:
+ (cd webapp && npm install)
+ (cd webapp && npm run test:run)
+
+##@ Build Dependencies
+
+## Location to install dependencies to
+LOCALBIN ?= $(shell pwd)/bin
+$(LOCALBIN):
+ mkdir -p $(LOCALBIN)
+
+## Tool Binaries
+GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint
+
+## Tool Versions
+GOLANGCI_LINT_VERSION ?= v1.64.8
+
+.PHONY: golangci-lint
+golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. If wrong version is installed, it will be overwritten.
+$(GOLANGCI_LINT): $(LOCALBIN)
+ test -s $(LOCALBIN)/golangci-lint && $(LOCALBIN)/golangci-lint --version | grep -q $(GOLANGCI_LINT_VERSION) || \
+ GOBIN=$(LOCALBIN) go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
diff --git a/README.md b/README.md
index 6553e1aa..24fbbcc4 100644
--- a/README.md
+++ b/README.md
@@ -1,14 +1,59 @@
-# GitHub Actions Runner Manager (garm)
+
+
+
+
+
+
+# GitHub Actions Runner Manager (GARM)
[](https://github.com/cloudbase/garm/actions/workflows/go-tests.yml)
-Welcome to garm!
+
-Garm enables you to create and automatically maintain pools of [self-hosted GitHub runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners), with autoscaling that can be used inside your github workflow runs.
+- [GitHub Actions Runner Manager GARM](#github-actions-runner-manager-garm)
+ - [About GARM](#about-garm)
+ - [Join us on slack](#join-us-on-slack)
+ - [Installing](#installing)
+ - [Quickstart](#quickstart)
+ - [Installing on Kubernetes](#installing-on-kubernetes)
+ - [Configuring GARM for GHES](#configuring-garm-for-ghes)
+ - [Configuring GARM for Gitea](#configuring-garm-for-gitea)
+ - [Enabling the web UI](#enabling-the-web-ui)
+ - [Using GARM](#using-garm)
+ - [Supported providers](#supported-providers)
+ - [Installing external providers](#installing-external-providers)
+ - [Optimizing your runners](#optimizing-your-runners)
+ - [Write your own provider](#write-your-own-provider)
-The goal of ```garm``` is to be simple to set up, simple to configure and simple to use. It is a single binary that can run on any GNU/Linux machine without any other requirements other than the providers it creates the runners in. It is intended to be easy to deploy in any environment and can create runners in any system you can write a provider for. There is no complicated setup process and no extremely complex concepts to understand. Once set up, it's meant to stay out of your way.
+
-Garm supports creating pools on either GitHub itself or on your own deployment of [GitHub Enterprise Server](https://docs.github.com/en/enterprise-server@3.5/admin/overview/about-github-enterprise-server). For instructions on how to use ```garm``` with GHE, see the [credentials](/doc/github_credentials.md) section of the documentation.
+## About GARM
+
+Welcome to GARM!
+
+GARM enables you to create and automatically maintain pools of self-hosted runners in both [Github](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners) and [Gitea](https://github.com/go-gitea/gitea/) with auto-scaling that can be used inside your workflow runs.
+
+The goal of ```GARM``` is to be simple to set up, simple to configure and simple to use. The server itself is a single binary that can run on any GNU/Linux machine without any other requirements other than the providers you want to enable in your setup. It is intended to be easy to deploy in any environment and can create runners in virtually any system you can write a provider for (if one does not alreay exist). There is no complicated setup process and no extremely complex concepts to understand. Once set up, it's meant to stay out of your way.
+
+Through the use of providers, `GARM` can create runners in a variety of environments using the same `GARM` instance. Whether you want to create runners in your OpenStack cloud, your Azure cloud or your Kubernetes cluster, that is easily achieved by installing the appropriate providers, configuring them in `GARM` and creating pools that use them. You can create zero-runner pools for instances with high costs (large VMs, GPU enabled instances, etc) and have them spin up on demand, or you can create large pools of eagerly created k8s backed runners that can be used for your CI/CD pipelines at a moment's notice. You can mix them up and create pools in any combination of providers or resource allocations you want.
+
+GARM supports two modes of operation:
+
+* Pools
+* Scale sets
+
+Here is a brief architectural diagram of how pools work and how GARM reacts to workflows triggered in GitHub (click the image to see a larger version):
+
+
+
+
+**Scale sets** work differently. While pools (as they are defined in GARM) rely on webhooks to know when a job was started and GARM needs to internally make the right decission in terms of which pool should handle that runner, scale sets have a lot of the scheduling and decission making logic done in GitHub itself.
+
+> [!IMPORTANT]
+> The README and documentation in the `main` branch are relevant to the not yet released code that is present in `main`. Following the documentation from the `main` branch for a stable release of GARM, may lead to errors. To view the documentation for the latest stable release, please switch to the appropriate tag. For information about setting up `v0.1.6`, please refer to the [v0.1.6 tag](https://github.com/cloudbase/garm/tree/v0.1.6).
+
+> [!CAUTION]
+> The `main` branch holds the latest code and is not guaranteed to be stable. If you are looking for a stable release, please check the releases page. If you plan to use the `main` branch, please do so on a new instance. Do not upgrade from a stable release to `main`.
## Join us on slack
@@ -18,120 +63,68 @@ Whether you're running into issues or just want to drop by and say "hi", feel fr
## Installing
-## Build from source
+### Quickstart
-You need to have Go installed, then run:
+Check out the [quickstart](/doc/quickstart.md) document for instructions on how to install ```GARM```. If you'd like to build from source, check out the [building from source](/doc/building_from_source.md) document.
- ```bash
- git clone https://github.com/cloudbase/garm
- cd garm
- go install ./...
- ```
+### Installing on Kubernetes
-You should now have both ```garm``` and ```garm-cli``` in your ```$GOPATH/bin``` folder.
+Thanks to the efforts of the amazing folks at [@mercedes-benz](https://github.com/mercedes-benz/), GARM can now be integrated into k8s via their operator. Check out the [GARM operator](https://github.com/mercedes-benz/garm-operator/) for more details.
-If you have docker/podman installed, you can also build statically linked binaries by running:
+## Configuring GARM for GHES
- ```bash
- make build-static
- ```
+GARM supports creating pools and scale sets in either GitHub itself or in your own deployment of [GitHub Enterprise Server](https://docs.github.com/en/enterprise-server@3.10/admin/overview/about-github-enterprise-server). For instructions on how to use ```GARM``` with GHE, see the [credentials](/doc/github_credentials.md) section of the documentation.
-The ```garm``` and ```garm-cli``` binaries will be built and copied to the ```bin/``` folder in your current working directory.
+## Configuring GARM for Gitea
-## Install the service
+GARM now has support for Gitea (>=1.24.0). For information on getting started with Gitea, see the [Gitea quickstart](/doc/gitea.md) document.
-Add a new system user:
+## Enabling the web UI
- ```bash
- useradd --shell /usr/bin/false \
- --system \
- --groups lxd \
- --no-create-home garm
- ```
+GARM now ships with a single page application. To enable it, add the following to your GARM config:
-The ```lxd``` group is only needed if you have a local LXD install and want to connect to the unix socket to use it. If you're connecting to a remote LXD server over TCP, you can skip adding the ```garm``` user to the ```lxd``` group.
+```toml
+[apiserver.webui]
+ enable = true
+```
-Copy the binary to somewhere in the system ```$PATH```:
+Check the [README.md](/webapp/README.md) file for details on the web UI.
- ```bash
- sudo cp $(go env GOPATH)/bin/garm /usr/local/bin/garm
- ```
+## Using GARM
-Or if you built garm using ```make```:
+GARM is designed with simplicity in mind. At least we try to keep it as simple as possible. We're aware that adding a new tool in your workflow can be painful, especially when you already have to deal with so many. The cognitive load for OPS has reached a level where it feels overwhelming at times to even wrap your head around a new tool. As such, we believe that tools should be simple, should take no more than a few hours to understand and set up and if you absolutely need to interact with the tool, it should be as intuitive as possible. Although we try our best to make this happen, we're aware that GARM has some rough edges, especially for new users. If you encounter issues or feel like the setup process was too complicated, please let us know. We're always looking to improve the user experience.
- ```bash
- sudo cp ./bin/garm /usr/local/bin/garm
- ```
+We've written a short introduction into some of the commands that GARM has and some of the concepts involved in setting up GARM, managing runners and how GitHub does some of the things it does.
-Create the config folder:
+[You can find it here](/doc/using_garm.md).
- ```bash
- sudo mkdir -p /etc/garm
- ```
+Please, feel free to [open an issue](https://github.com/cloudbase/garm/issues/new) if you find the documentation lacking and would like more info. Sometimes we forget the challenges that new users face as we're so close to the code and how it works. Any feedback is welcome and we're always looking to improve the documentation.
-Copy the config template:
+## Supported providers
- ```bash
- sudo cp ./testdata/config.toml /etc/garm/
- ```
+GARM uses providers to create runners in a particular IaaS. The providers are external executables that GARM calls into to create runners. Before you can create runners, you'll need to install at least one provider.
-Copy the external provider (optional):
+### Installing external providers
- ```bash
- sudo cp -a ./contrib/providers.d /etc/garm/
- ```
+External providers are binaries that GARM calls into to create runners in a particular IaaS. There are several external providers available:
-Copy the systemd service file:
+* [Akamai/Linode](https://github.com/flatcar/garm-provider-linode) - Experimental
+* [Amazon EC2](https://github.com/cloudbase/garm-provider-aws)
+* [Azure](https://github.com/cloudbase/garm-provider-azure)
+* [Equinix Metal](https://github.com/cloudbase/garm-provider-equinix)
+* [Google Cloud Platform (GCP)](https://github.com/cloudbase/garm-provider-gcp)
+* [Incus](https://github.com/cloudbase/garm-provider-incus)
+* [Kubernetes](https://github.com/mercedes-benz/garm-provider-k8s) - Thanks to the amazing folks at @mercedes-benz for sharing their awesome provider!
+* [LXD](https://github.com/cloudbase/garm-provider-lxd)
+* [OpenStack](https://github.com/cloudbase/garm-provider-openstack)
+* [Oracle Cloud Infrastructure (OCI)](https://github.com/cloudbase/garm-provider-oci)
- ```bash
- sudo cp ./contrib/garm.service /etc/systemd/system/
- ```
+Follow the instructions in the README of each provider to install them.
-Change permissions on config folder:
+## Optimizing your runners
- ```bash
- sudo chown -R garm:garm /etc/garm
- sudo chmod 750 -R /etc/garm
- ```
-
-Enable the service:
-
- ```bash
- sudo systemctl enable garm
- ```
-
-Customize the config in ```/etc/garm/config.toml```, and start the service:
-
- ```bash
- sudo systemctl start garm
- ```
-
-## Configuration
-
-The ```garm``` configuration is a simple ```toml```. A sample of the config file can be found in [the testdata folder](/testdata/config.toml).
-
-There are 3 major sections of the config that require your attention:
-
-* [Github credentials section](/doc/github_credentials.md)
-* [Providers section](/doc/providers.md)
-* [The database section](/doc/database.md)
-
-Once you've configured your database, providers and github credentials, you'll need to configure your [webhooks and the callback_url](/doc/webhooks_and_callbacks.md).
-
-At this point, you should be done. Have a look at the [running garm document](/doc/running_garm.md) for usage instructions and available features.
-
-If you would like to use ```garm``` with a different IaaS than the ones already available, have a loot at the [writing an external provider](/doc/external_provider.md) page.
-
-## Security considerations
-
-Garm does not apply any ACLs of any kind to the instances it creates. That task remains in the responsibility of the user. [Here is a guide for creating ACLs in LXD](https://linuxcontainers.org/lxd/docs/master/howto/network_acls/). You can of course use ```iptables``` or ```nftables``` to create any rules you wish. I recommend you create a separate isolated lxd bridge for runners, and secure it using ACLs/iptables/nftables.
-
-You must make sure that the code that runs as part of the workflows is trusted, and if that cannot be done, you must make sure that any malicious code that will be pulled in by the actions and run as part of a workload, is as contained as possible. There is a nice article about [securing your workflow runs here](https://blog.gitguardian.com/github-actions-security-cheat-sheet/).
+If you would like to optimize the startup time of new instance, take a look at the [performance considerations](/doc/performance_considerations.md) page.
## Write your own provider
-The providers are interfaces between ```garm``` and a particular IaaS in which we spin up GitHub Runners. These providers can be either **native** or **external**. The **native** providers are written in ```Go```, and must implement [the interface defined here](https://github.com/cloudbase/garm/blob/main/runner/common/provider.go#L22-L39). **External** providers can be written in any language, as they are in the form of an external executable that ```garm``` calls into.
-
-There is currently one **native** provider for [LXD](https://linuxcontainers.org/lxd/) and two **external** providers for [Openstack and Azure](/contrib/providers.d/).
-
-If you want to write your own provider, you can choose to write a native one, or implement an **external** one. The easiest one to write is probably an **external** provider. Please see the [Writing an external provider](/doc/external_provider.md) document for details. Also, feel free to inspect the two available external providers in this repository.
+The providers are interfaces between ```GARM``` and a particular IaaS in which we spin up GitHub Runners. **External** providers can be written in any language, as they are in the form of an external executable that ```GARM``` calls into. Please see the [Writing an external provider](/doc/external_provider.md) document for details. Also, feel free to inspect the two available sample external providers in this repository.
diff --git a/apiserver/controllers/controllers.go b/apiserver/controllers/controllers.go
index 51d497c2..019671eb 100644
--- a/apiserver/controllers/controllers.go
+++ b/apiserver/controllers/controllers.go
@@ -15,26 +15,60 @@
package controllers
import (
+ "context"
"encoding/json"
+ "errors"
+ "fmt"
"io"
- "log"
+ "log/slog"
"net/http"
+ "net/url"
"strings"
+ "github.com/gorilla/mux"
+ "github.com/gorilla/websocket"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/apiserver/params"
"github.com/cloudbase/garm/auth"
- gErrors "github.com/cloudbase/garm/errors"
+ "github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/metrics"
runnerParams "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner"
- "github.com/cloudbase/garm/util"
+ "github.com/cloudbase/garm/runner" //nolint:typecheck
+ garmUtil "github.com/cloudbase/garm/util"
wsWriter "github.com/cloudbase/garm/websocket"
-
- "github.com/gorilla/websocket"
- "github.com/pkg/errors"
+ "github.com/cloudbase/garm/workers/websocket/events"
)
-func NewAPIController(r *runner.Runner, authenticator *auth.Authenticator, hub *wsWriter.Hub) (*APIController, error) {
+func NewAPIController(r *runner.Runner, authenticator *auth.Authenticator, hub *wsWriter.Hub, apiCfg config.APIServer) (*APIController, error) {
+ controllerInfo, err := r.GetControllerInfo(auth.GetAdminContext(context.Background()))
+ if err != nil {
+ return nil, fmt.Errorf("failed to get controller info: %w", err)
+ }
+ var checkOrigin func(r *http.Request) bool
+ if len(apiCfg.CORSOrigins) > 0 {
+ checkOrigin = func(r *http.Request) bool {
+ origin := r.Header["Origin"]
+ if len(origin) == 0 {
+ return true
+ }
+ u, err := url.Parse(origin[0])
+ if err != nil {
+ return false
+ }
+ for _, val := range apiCfg.CORSOrigins {
+ corsVal, err := url.Parse(val)
+ if err != nil {
+ continue
+ }
+ if garmUtil.ASCIIEqualFold(u.Host, corsVal.Host) {
+ return true
+ }
+ }
+ return false
+ }
+ }
return &APIController{
r: r,
auth: authenticator,
@@ -42,37 +76,38 @@ func NewAPIController(r *runner.Runner, authenticator *auth.Authenticator, hub *
upgrader: websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 16384,
+ CheckOrigin: checkOrigin,
},
+ controllerID: controllerInfo.ControllerID.String(),
}, nil
}
type APIController struct {
- r *runner.Runner
- auth *auth.Authenticator
- hub *wsWriter.Hub
- upgrader websocket.Upgrader
+ r *runner.Runner
+ auth *auth.Authenticator
+ hub *wsWriter.Hub
+ upgrader websocket.Upgrader
+ controllerID string
}
-func handleError(w http.ResponseWriter, err error) {
- w.Header().Add("Content-Type", "application/json")
- origErr := errors.Cause(err)
+func handleError(ctx context.Context, w http.ResponseWriter, err error) {
+ w.Header().Set("Content-Type", "application/json")
apiErr := params.APIErrorResponse{
- Details: origErr.Error(),
+ Details: err.Error(),
}
-
- switch origErr.(type) {
- case *gErrors.NotFoundError:
+ switch {
+ case errors.Is(err, gErrors.ErrNotFound):
w.WriteHeader(http.StatusNotFound)
apiErr.Error = "Not Found"
- case *gErrors.UnauthorizedError:
+ case errors.Is(err, gErrors.ErrUnauthorized):
w.WriteHeader(http.StatusUnauthorized)
apiErr.Error = "Not Authorized"
// Don't include details on 401 errors.
apiErr.Details = ""
- case *gErrors.BadRequestError:
+ case errors.Is(err, gErrors.ErrBadRequest):
w.WriteHeader(http.StatusBadRequest)
apiErr.Error = "Bad Request"
- case *gErrors.DuplicateUserError, *gErrors.ConflictError:
+ case errors.Is(err, gErrors.ErrDuplicateEntity), errors.Is(err, &gErrors.ConflictError{}):
w.WriteHeader(http.StatusConflict)
apiErr.Error = "Conflict"
default:
@@ -83,72 +118,122 @@ func handleError(w http.ResponseWriter, err error) {
}
if err := json.NewEncoder(w).Encode(apiErr); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
-func (a *APIController) webhookMetricLabelValues(valid, reason string) []string {
- controllerInfo, err := a.r.GetControllerInfo(auth.GetAdminContext())
- if err != nil {
- log.Printf("failed to get controller info: %s", err)
- // If labels are empty, not attempt will be made to record webhook.
- return []string{}
- }
- return []string{
- valid, reason,
- controllerInfo.Hostname, controllerInfo.ControllerID.String(),
- }
-}
-
-func (a *APIController) handleWorkflowJobEvent(w http.ResponseWriter, r *http.Request) {
+func (a *APIController) handleWorkflowJobEvent(ctx context.Context, w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
- handleError(w, gErrors.NewBadRequestError("invalid post body: %s", err))
+ handleError(ctx, w, gErrors.NewBadRequestError("invalid post body: %s", err))
return
}
signature := r.Header.Get("X-Hub-Signature-256")
hookType := r.Header.Get("X-Github-Hook-Installation-Target-Type")
+ giteaTargetType := r.Header.Get("X-Gitea-Hook-Installation-Target-Type")
- var labelValues []string
- defer func() {
- if len(labelValues) == 0 {
+ forgeType := runnerParams.GithubEndpointType
+ if giteaTargetType != "" {
+ forgeType = runnerParams.GiteaEndpointType
+ hookType = giteaTargetType
+ }
+
+ if err := a.r.DispatchWorkflowJob(hookType, signature, forgeType, body); err != nil {
+ switch {
+ case errors.Is(err, gErrors.ErrNotFound):
+ metrics.WebhooksReceived.WithLabelValues(
+ "false", // label: valid
+ "owner_unknown", // label: reason
+ ).Inc()
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "got not found error from DispatchWorkflowJob. webhook not meant for us?")
return
- }
- if err := metrics.RecordWebhookWithLabels(labelValues...); err != nil {
- log.Printf("failed to record metric: %s", err)
- }
- }()
-
- if err := a.r.DispatchWorkflowJob(hookType, signature, body); err != nil {
- if errors.Is(err, gErrors.ErrNotFound) {
- labelValues = a.webhookMetricLabelValues("false", "owner_unknown")
- log.Printf("got not found error from DispatchWorkflowJob. webhook not meant for us?: %q", err)
- return
- } else if strings.Contains(err.Error(), "signature") { // TODO: check error type
- labelValues = a.webhookMetricLabelValues("false", "signature_invalid")
- } else {
- labelValues = a.webhookMetricLabelValues("false", "unknown")
+ case strings.Contains(err.Error(), "signature"):
+ // nolint:golangci-lint,godox TODO: check error type
+ metrics.WebhooksReceived.WithLabelValues(
+ "false", // label: valid
+ "signature_invalid", // label: reason
+ ).Inc()
+ default:
+ metrics.WebhooksReceived.WithLabelValues(
+ "false", // label: valid
+ "unknown", // label: reason
+ ).Inc()
}
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
- labelValues = a.webhookMetricLabelValues("true", "")
+ metrics.WebhooksReceived.WithLabelValues(
+ "true", // label: valid
+ "", // label: reason
+ ).Inc()
}
-func (a *APIController) CatchAll(w http.ResponseWriter, r *http.Request) {
+func (a *APIController) WebhookHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ controllerID, ok := vars["controllerID"]
+ // If the webhook URL includes a controller ID, we validate that it's meant for us. We still
+ // support bare webhook URLs, which are tipically configured manually by the user.
+ // The controllerID suffixed webhook URL is useful when configuring the webhook for an entity
+ // via garm. We cannot tag a webhook URL on github, so there is no way to determine ownership.
+ // Using a controllerID suffix is a simple way to denote ownership.
+ if ok && controllerID != a.controllerID {
+ slog.InfoContext(ctx, "ignoring webhook meant for foreign controller", "req_controller_id", controllerID)
+ return
+ }
+
headers := r.Header.Clone()
event := runnerParams.Event(headers.Get("X-Github-Event"))
switch event {
case runnerParams.WorkflowJobEvent:
- a.handleWorkflowJobEvent(w, r)
+ a.handleWorkflowJobEvent(ctx, w, r)
+ case runnerParams.PingEvent:
+ // Ignore ping event. We may want to save the ping in the github entity table in the future.
default:
- log.Printf("ignoring unknown event %s", util.SanitizeLogEntry(string(event)))
+ slog.DebugContext(ctx, "ignoring unknown event", "gh_event", util.SanitizeLogEntry(string(event)))
+ }
+}
+
+func (a *APIController) EventsHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ if !auth.IsAdmin(ctx) {
+ w.WriteHeader(http.StatusForbidden)
+ if _, err := w.Write([]byte("events are available to admin users")); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
return
}
+
+ conn, err := a.upgrader.Upgrade(w, r, nil)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error upgrading to websockets")
+ return
+ }
+ defer conn.Close()
+
+ wsClient, err := wsWriter.NewClient(ctx, conn)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create new client")
+ return
+ }
+ defer wsClient.Stop()
+
+ eventHandler, err := events.NewHandler(ctx, wsClient)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create new event handler")
+ return
+ }
+
+ if err := eventHandler.Start(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to start event handler")
+ return
+ }
+ <-eventHandler.Done()
}
func (a *APIController) WSHandler(writer http.ResponseWriter, req *http.Request) {
@@ -156,153 +241,267 @@ func (a *APIController) WSHandler(writer http.ResponseWriter, req *http.Request)
if !auth.IsAdmin(ctx) {
writer.WriteHeader(http.StatusForbidden)
if _, err := writer.Write([]byte("you need admin level access to view logs")); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if a.hub == nil {
- handleError(writer, gErrors.NewBadRequestError("log streamer is disabled"))
+ handleError(ctx, writer, gErrors.NewBadRequestError("log streamer is disabled"))
return
}
conn, err := a.upgrader.Upgrade(writer, req, nil)
if err != nil {
- log.Printf("error upgrading to websockets: %v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error upgrading to websockets")
return
}
+ defer conn.Close()
- // TODO (gsamfira): Handle ExpiresAt. Right now, if a client uses
- // a valid token to authenticate, and keeps the websocket connection
- // open, it will allow that client to stream logs via websockets
- // until the connection is broken. We need to forcefully disconnect
- // the client once the token expires.
- client, err := wsWriter.NewClient(conn, a.hub)
+ client, err := wsWriter.NewClient(ctx, conn)
if err != nil {
- log.Printf("failed to create new client: %v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create new client")
return
}
if err := a.hub.Register(client); err != nil {
- log.Printf("failed to register new client: %v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to register new client")
return
}
- client.Go()
+ defer a.hub.Unregister(client)
+
+ if err := client.Start(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to start client")
+ return
+ }
+ <-client.Done()
+ slog.Info("client disconnected", "client_id", client.ID())
}
// NotFoundHandler is returned when an invalid URL is acccessed
func (a *APIController) NotFoundHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
apiErr := params.APIErrorResponse{
Details: "Resource not found",
Error: "Not found",
}
- w.WriteHeader(http.StatusNotFound)
+
w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusNotFound)
if err := json.NewEncoder(w).Encode(apiErr); err != nil {
- log.Printf("failet to write response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failet to write response")
}
}
+// swagger:route GET /metrics-token metrics-token GetMetricsToken
+//
+// Returns a JWT token that can be used to access the metrics endpoint.
+//
+// Responses:
+// 200: JWTResponse
+// 401: APIErrorResponse
func (a *APIController) MetricsTokenHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if !auth.IsAdmin(ctx) {
- handleError(w, gErrors.ErrUnauthorized)
+ handleError(ctx, w, gErrors.ErrUnauthorized)
return
}
token, err := a.auth.GetJWTMetricsToken(ctx)
if err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
err = json.NewEncoder(w).Encode(runnerParams.JWTResponse{Token: token})
if err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route POST /auth/login login Login
+//
+// Logs in a user and returns a JWT token.
+//
+// Parameters:
+// + name: Body
+// description: Login information.
+// type: PasswordLoginParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: JWTResponse
+// 400: APIErrorResponse
+//
// LoginHandler returns a jwt token
func (a *APIController) LoginHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
var loginInfo runnerParams.PasswordLoginParams
if err := json.NewDecoder(r.Body).Decode(&loginInfo); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
if err := loginInfo.Validate(); err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
- ctx := r.Context()
ctx, err := a.auth.AuthenticateUser(ctx, loginInfo)
if err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
tokenString, err := a.auth.GetJWTToken(ctx)
if err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(runnerParams.JWTResponse{Token: tokenString}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route POST /first-run first-run FirstRun
+//
+// Initialize the first run of the controller.
+//
+// Parameters:
+// + name: Body
+// description: Create a new user.
+// type: NewUserParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: User
+// 400: APIErrorResponse
func (a *APIController) FirstRunHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
if a.auth.IsInitialized() {
err := gErrors.NewConflictError("already initialized")
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
- ctx := r.Context()
-
var newUserParams runnerParams.NewUserParams
if err := json.NewDecoder(r.Body).Decode(&newUserParams); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
newUser, err := a.auth.InitController(ctx, newUserParams)
if err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(newUser); err != nil {
- log.Printf("failed to encode response: %q", err)
- }
-}
-
-func (a *APIController) ListCredentials(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
- creds, err := a.r.ListCredentials(ctx)
- if err != nil {
- handleError(w, err)
- return
- }
-
- w.Header().Set("Content-Type", "application/json")
- if err := json.NewEncoder(w).Encode(creds); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /providers providers ListProviders
+//
+// List all providers.
+//
+// Responses:
+// 200: Providers
+// 400: APIErrorResponse
func (a *APIController) ListProviders(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
providers, err := a.r.ListProviders(ctx)
if err != nil {
- handleError(w, err)
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(providers); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /jobs jobs ListJobs
+//
+// List all jobs.
+//
+// Responses:
+// 200: Jobs
+// 400: APIErrorResponse
+func (a *APIController) ListAllJobs(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ jobs, err := a.r.ListAllJobs(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(jobs); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /controller-info controllerInfo ControllerInfo
+//
+// Get controller info.
+//
+// Responses:
+// 200: ControllerInfo
+// 409: APIErrorResponse
+func (a *APIController) ControllerInfoHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ info, err := a.r.GetControllerInfo(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route PUT /controller controller UpdateController
+//
+// Update controller.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when updating the controller.
+// type: UpdateControllerParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ControllerInfo
+// 400: APIErrorResponse
+func (a *APIController) UpdateControllerHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ var updateParams runnerParams.UpdateControllerParams
+ if err := json.NewDecoder(r.Body).Decode(&updateParams); err != nil {
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if err := updateParams.Validate(); err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ info, err := a.r.UpdateController(ctx, updateParams)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
diff --git a/apiserver/controllers/enterprises.go b/apiserver/controllers/enterprises.go
index 8614384d..b4b3e528 100644
--- a/apiserver/controllers/enterprises.go
+++ b/apiserver/controllers/enterprises.go
@@ -16,54 +16,106 @@ package controllers
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
- "github.com/cloudbase/garm/apiserver/params"
- gErrors "github.com/cloudbase/garm/errors"
- runnerParams "github.com/cloudbase/garm/params"
-
"github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/apiserver/params"
+ runnerParams "github.com/cloudbase/garm/params"
)
+// swagger:route POST /enterprises enterprises CreateEnterprise
+//
+// Create enterprise with the given parameters.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used to create the enterprise.
+// type: CreateEnterpriseParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Enterprise
+// default: APIErrorResponse
func (a *APIController) CreateEnterpriseHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var enterpriseData runnerParams.CreateEnterpriseParams
if err := json.NewDecoder(r.Body).Decode(&enterpriseData); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
enterprise, err := a.r.CreateEnterprise(ctx, enterpriseData)
if err != nil {
- log.Printf("error creating enterprise: %+v", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating enterprise")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(enterprise); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /enterprises enterprises ListEnterprises
+//
+// List all enterprises.
+//
+// Parameters:
+// + name: name
+// description: Exact enterprise name to filter by
+// type: string
+// in: query
+// required: false
+//
+// + name: endpoint
+// description: Exact endpoint name to filter by
+// type: string
+// in: query
+// required: false
+//
+// Responses:
+// 200: Enterprises
+// default: APIErrorResponse
func (a *APIController) ListEnterprisesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- enterprise, err := a.r.ListEnterprises(ctx)
+ filter := runnerParams.EnterpriseFilter{
+ Name: r.URL.Query().Get("name"),
+ Endpoint: r.URL.Query().Get("endpoint"),
+ }
+ enterprise, err := a.r.ListEnterprises(ctx, filter)
if err != nil {
- log.Printf("listing enterprise: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing enterprise")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(enterprise); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /enterprises/{enterpriseID} enterprises GetEnterprise
+//
+// Get enterprise by ID.
+//
+// Parameters:
+// + name: enterpriseID
+// description: The ID of the enterprise to fetch.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Enterprise
+// default: APIErrorResponse
func (a *APIController) GetEnterpriseByIDHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -75,24 +127,37 @@ func (a *APIController) GetEnterpriseByIDHandler(w http.ResponseWriter, r *http.
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
enterprise, err := a.r.GetEnterpriseByID(ctx, enterpriseID)
if err != nil {
- log.Printf("fetching enterprise: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching enterprise")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(enterprise); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route DELETE /enterprises/{enterpriseID} enterprises DeleteEnterprise
+//
+// Delete enterprise by ID.
+//
+// Parameters:
+// + name: enterpriseID
+// description: ID of the enterprise to delete.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
func (a *APIController) DeleteEnterpriseHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -104,22 +169,40 @@ func (a *APIController) DeleteEnterpriseHandler(w http.ResponseWriter, r *http.R
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if err := a.r.DeleteEnterprise(ctx, enterpriseID); err != nil {
- log.Printf("removing enterprise: %+v", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing enterprise")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
+// swagger:route PUT /enterprises/{enterpriseID} enterprises UpdateEnterprise
+//
+// Update enterprise with the given parameters.
+//
+// Parameters:
+// + name: enterpriseID
+// description: The ID of the enterprise to update.
+// type: string
+// in: path
+// required: true
+// + name: Body
+// description: Parameters used when updating the enterprise.
+// type: UpdateEntityParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Enterprise
+// default: APIErrorResponse
func (a *APIController) UpdateEnterpriseHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -131,30 +214,50 @@ func (a *APIController) UpdateEnterpriseHandler(w http.ResponseWriter, r *http.R
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
- var updatePayload runnerParams.UpdateRepositoryParams
+ var updatePayload runnerParams.UpdateEntityParams
if err := json.NewDecoder(r.Body).Decode(&updatePayload); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
enterprise, err := a.r.UpdateEnterprise(ctx, enterpriseID, updatePayload)
if err != nil {
- log.Printf("error updating enterprise: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error updating enterprise: %s")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(enterprise); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route POST /enterprises/{enterpriseID}/pools enterprises pools CreateEnterprisePool
+//
+// Create enterprise pool with the parameters given.
+//
+// Parameters:
+// + name: enterpriseID
+// description: Enterprise ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the enterprise pool.
+// type: CreatePoolParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Pool
+// default: APIErrorResponse
func (a *APIController) CreateEnterprisePoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -166,31 +269,101 @@ func (a *APIController) CreateEnterprisePoolHandler(w http.ResponseWriter, r *ht
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.CreatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.CreateEnterprisePool(ctx, enterpriseID, poolData)
if err != nil {
- log.Printf("error creating enterprise pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating enterprise pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route POST /enterprises/{enterpriseID}/scalesets enterprises scalesets CreateEnterpriseScaleSet
+//
+// Create enterprise pool with the parameters given.
+//
+// Parameters:
+// + name: enterpriseID
+// description: Enterprise ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the enterprise scale set.
+// type: CreateScaleSetParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ScaleSet
+// default: APIErrorResponse
+func (a *APIController) CreateEnterpriseScaleSetHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ enterpriseID, ok := vars["enterpriseID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No enterprise ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ var scaleSetData runnerParams.CreateScaleSetParams
+ if err := json.NewDecoder(r.Body).Decode(&scaleSetData); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ scaleSet, err := a.r.CreateEntityScaleSet(ctx, runnerParams.ForgeEntityTypeEnterprise, enterpriseID, scaleSetData)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating enterprise scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSet); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /enterprises/{enterpriseID}/pools enterprises pools ListEnterprisePools
+//
+// List enterprise pools.
+//
+// Parameters:
+// + name: enterpriseID
+// description: Enterprise ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Pools
+// default: APIErrorResponse
func (a *APIController) ListEnterprisePoolsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -201,25 +374,86 @@ func (a *APIController) ListEnterprisePoolsHandler(w http.ResponseWriter, r *htt
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pools, err := a.r.ListEnterprisePools(ctx, enterpriseID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pools); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
-
}
+// swagger:route GET /enterprises/{enterpriseID}/scalesets enterprises scalesets ListEnterpriseScaleSets
+//
+// List enterprise scale sets.
+//
+// Parameters:
+// + name: enterpriseID
+// description: Enterprise ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ScaleSets
+// default: APIErrorResponse
+func (a *APIController) ListEnterpriseScaleSetsHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ enterpriseID, ok := vars["enterpriseID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No enterprise ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ scaleSets, err := a.r.ListEntityScaleSets(ctx, runnerParams.ForgeEntityTypeEnterprise, enterpriseID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing scale sets")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSets); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /enterprises/{enterpriseID}/pools/{poolID} enterprises pools GetEnterprisePool
+//
+// Get enterprise pool by ID.
+//
+// Parameters:
+// + name: enterpriseID
+// description: Enterprise ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: poolID
+// description: Pool ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Pool
+// default: APIErrorResponse
func (a *APIController) GetEnterprisePoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -231,24 +465,43 @@ func (a *APIController) GetEnterprisePoolHandler(w http.ResponseWriter, r *http.
Error: "Bad Request",
Details: "No enterprise or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pool, err := a.r.GetEnterprisePoolByID(ctx, enterpriseID, poolID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route DELETE /enterprises/{enterpriseID}/pools/{poolID} enterprises pools DeleteEnterprisePool
+//
+// Delete enterprise pool by ID.
+//
+// Parameters:
+// + name: enterpriseID
+// description: Enterprise ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: poolID
+// description: ID of the enterprise pool to delete.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
func (a *APIController) DeleteEnterprisePoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -261,22 +514,47 @@ func (a *APIController) DeleteEnterprisePoolHandler(w http.ResponseWriter, r *ht
Error: "Bad Request",
Details: "No enterprise or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if err := a.r.DeleteEnterprisePool(ctx, enterpriseID, poolID); err != nil {
- log.Printf("removing pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
+// swagger:route PUT /enterprises/{enterpriseID}/pools/{poolID} enterprises pools UpdateEnterprisePool
+//
+// Update enterprise pool with the parameters given.
+//
+// Parameters:
+// + name: enterpriseID
+// description: Enterprise ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: poolID
+// description: ID of the enterprise pool to update.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when updating the enterprise pool.
+// type: UpdatePoolParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Pool
+// default: APIErrorResponse
func (a *APIController) UpdateEnterprisePoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -289,27 +567,27 @@ func (a *APIController) UpdateEnterprisePoolHandler(w http.ResponseWriter, r *ht
Error: "Bad Request",
Details: "No enterprise or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.UpdatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.UpdateEnterprisePool(ctx, enterpriseID, poolID, poolData)
if err != nil {
- log.Printf("error creating enterprise pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating enterprise pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
diff --git a/apiserver/controllers/gitea_credentials.go b/apiserver/controllers/gitea_credentials.go
new file mode 100644
index 00000000..777be982
--- /dev/null
+++ b/apiserver/controllers/gitea_credentials.go
@@ -0,0 +1,241 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package controllers
+
+import (
+ "encoding/json"
+ "log/slog"
+ "math"
+ "net/http"
+ "strconv"
+
+ "github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+// swagger:route GET /gitea/credentials credentials ListGiteaCredentials
+//
+// List all credentials.
+//
+// Responses:
+// 200: Credentials
+// 400: APIErrorResponse
+func (a *APIController) ListGiteaCredentials(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ creds, err := a.r.ListGiteaCredentials(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(creds); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route POST /gitea/credentials credentials CreateGiteaCredentials
+//
+// Create a Gitea credential.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when creating a Gitea credential.
+// type: CreateGiteaCredentialsParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) CreateGiteaCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var params params.CreateGiteaCredentialsParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.CreateGiteaCredentials(ctx, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create Gitea credential")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /gitea/credentials/{id} credentials GetGiteaCredentials
+//
+// Get a Gitea credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the Gitea credential.
+// type: integer
+// in: path
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) GetGiteaCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.GetGiteaCredentials(ctx, uint(id))
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to get Gitea credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /gitea/credentials/{id} credentials DeleteGiteaCredentials
+//
+// Delete a Gitea credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the Gitea credential.
+// type: integer
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) DeleteGiteaCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if err := a.r.DeleteGiteaCredentials(ctx, uint(id)); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to delete Gitea credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+}
+
+// swagger:route PUT /gitea/credentials/{id} credentials UpdateGiteaCredentials
+//
+// Update a Gitea credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the Gitea credential.
+// type: integer
+// in: path
+// required: true
+// + name: Body
+// description: Parameters used when updating a Gitea credential.
+// type: UpdateGiteaCredentialsParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) UpdateGiteaCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ var params params.UpdateGiteaCredentialsParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.UpdateGiteaCredentials(ctx, uint(id), params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to update Gitea credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/controllers/gitea_endpoints.go b/apiserver/controllers/gitea_endpoints.go
new file mode 100644
index 00000000..67e85178
--- /dev/null
+++ b/apiserver/controllers/gitea_endpoints.go
@@ -0,0 +1,199 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package controllers
+
+import (
+ "encoding/json"
+ "log/slog"
+ "net/http"
+
+ "github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+// swagger:route POST /gitea/endpoints endpoints CreateGiteaEndpoint
+//
+// Create a Gitea Endpoint.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when creating a Gitea endpoint.
+// type: CreateGiteaEndpointParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) CreateGiteaEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var params params.CreateGiteaEndpointParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ endpoint, err := a.r.CreateGiteaEndpoint(ctx, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create Gitea endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /gitea/endpoints endpoints ListGiteaEndpoints
+//
+// List all Gitea Endpoints.
+//
+// Responses:
+// 200: ForgeEndpoints
+// default: APIErrorResponse
+func (a *APIController) ListGiteaEndpoints(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ endpoints, err := a.r.ListGiteaEndpoints(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to list Gitea endpoints")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoints); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /gitea/endpoints/{name} endpoints GetGiteaEndpoint
+//
+// Get a Gitea Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the Gitea endpoint.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) GetGiteaEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+ endpoint, err := a.r.GetGiteaEndpoint(ctx, name)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to get Gitea endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /gitea/endpoints/{name} endpoints DeleteGiteaEndpoint
+//
+// Delete a Gitea Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the Gitea endpoint.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) DeleteGiteaEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+ if err := a.r.DeleteGiteaEndpoint(ctx, name); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to delete Gitea endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+}
+
+// swagger:route PUT /gitea/endpoints/{name} endpoints UpdateGiteaEndpoint
+//
+// Update a Gitea Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the Gitea endpoint.
+// type: string
+// in: path
+// required: true
+// + name: Body
+// description: Parameters used when updating a Gitea endpoint.
+// type: UpdateGiteaEndpointParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) UpdateGiteaEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ var params params.UpdateGiteaEndpointParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ endpoint, err := a.r.UpdateGiteaEndpoint(ctx, name, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to update GitHub endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/controllers/github_credentials.go b/apiserver/controllers/github_credentials.go
new file mode 100644
index 00000000..04e087e5
--- /dev/null
+++ b/apiserver/controllers/github_credentials.go
@@ -0,0 +1,242 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package controllers
+
+import (
+ "encoding/json"
+ "log/slog"
+ "math"
+ "net/http"
+ "strconv"
+
+ "github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+// swagger:route GET /credentials credentials ListCredentials
+// swagger:route GET /github/credentials credentials ListCredentials
+//
+// List all credentials.
+//
+// Responses:
+// 200: Credentials
+// 400: APIErrorResponse
+func (a *APIController) ListCredentials(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ creds, err := a.r.ListCredentials(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(creds); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route POST /github/credentials credentials CreateCredentials
+//
+// Create a GitHub credential.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when creating a GitHub credential.
+// type: CreateGithubCredentialsParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) CreateGithubCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var params params.CreateGithubCredentialsParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.CreateGithubCredentials(ctx, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create GitHub credential")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /github/credentials/{id} credentials GetCredentials
+//
+// Get a GitHub credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the GitHub credential.
+// type: integer
+// in: path
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) GetGithubCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.GetGithubCredentials(ctx, uint(id))
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to get GitHub credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /github/credentials/{id} credentials DeleteCredentials
+//
+// Delete a GitHub credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the GitHub credential.
+// type: integer
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) DeleteGithubCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if err := a.r.DeleteGithubCredentials(ctx, uint(id)); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to delete GitHub credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+}
+
+// swagger:route PUT /github/credentials/{id} credentials UpdateCredentials
+//
+// Update a GitHub credential.
+//
+// Parameters:
+// + name: id
+// description: ID of the GitHub credential.
+// type: integer
+// in: path
+// required: true
+// + name: Body
+// description: Parameters used when updating a GitHub credential.
+// type: UpdateGithubCredentialsParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeCredentials
+// 400: APIErrorResponse
+func (a *APIController) UpdateGithubCredential(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ idParam, ok := vars["id"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing id in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ id, err := strconv.ParseUint(idParam, 10, 64)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if id > math.MaxUint {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "id is too large")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ var params params.UpdateGithubCredentialsParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ cred, err := a.r.UpdateGithubCredentials(ctx, uint(id), params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to update GitHub credential")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(cred); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/controllers/github_endpoints.go b/apiserver/controllers/github_endpoints.go
new file mode 100644
index 00000000..482f9d03
--- /dev/null
+++ b/apiserver/controllers/github_endpoints.go
@@ -0,0 +1,199 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package controllers
+
+import (
+ "encoding/json"
+ "log/slog"
+ "net/http"
+
+ "github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+// swagger:route POST /github/endpoints endpoints CreateGithubEndpoint
+//
+// Create a GitHub Endpoint.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when creating a GitHub endpoint.
+// type: CreateGithubEndpointParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) CreateGithubEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var params params.CreateGithubEndpointParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ endpoint, err := a.r.CreateGithubEndpoint(ctx, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to create GitHub endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /github/endpoints endpoints ListGithubEndpoints
+//
+// List all GitHub Endpoints.
+//
+// Responses:
+// 200: ForgeEndpoints
+// default: APIErrorResponse
+func (a *APIController) ListGithubEndpoints(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ endpoints, err := a.r.ListGithubEndpoints(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to list GitHub endpoints")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoints); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /github/endpoints/{name} endpoints GetGithubEndpoint
+//
+// Get a GitHub Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the GitHub endpoint.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) GetGithubEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+ endpoint, err := a.r.GetGithubEndpoint(ctx, name)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to get GitHub endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /github/endpoints/{name} endpoints DeleteGithubEndpoint
+//
+// Delete a GitHub Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the GitHub endpoint.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) DeleteGithubEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+ if err := a.r.DeleteGithubEndpoint(ctx, name); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to delete GitHub endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+}
+
+// swagger:route PUT /github/endpoints/{name} endpoints UpdateGithubEndpoint
+//
+// Update a GitHub Endpoint.
+//
+// Parameters:
+// + name: name
+// description: The name of the GitHub endpoint.
+// type: string
+// in: path
+// required: true
+// + name: Body
+// description: Parameters used when updating a GitHub endpoint.
+// type: UpdateGithubEndpointParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ForgeEndpoint
+// default: APIErrorResponse
+func (a *APIController) UpdateGithubEndpoint(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ name, ok := vars["name"]
+ if !ok {
+ slog.ErrorContext(ctx, "missing name in request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ var params params.UpdateGithubEndpointParams
+ if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode request")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ endpoint, err := a.r.UpdateGithubEndpoint(ctx, name, params)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to update GitHub endpoint")
+ handleError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(endpoint); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/controllers/instances.go b/apiserver/controllers/instances.go
index e563c8eb..3209a5c2 100644
--- a/apiserver/controllers/instances.go
+++ b/apiserver/controllers/instances.go
@@ -16,16 +16,31 @@ package controllers
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
-
- "github.com/cloudbase/garm/apiserver/params"
- gErrors "github.com/cloudbase/garm/errors"
- runnerParams "github.com/cloudbase/garm/params"
+ "strconv"
"github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/apiserver/params"
+ runnerParams "github.com/cloudbase/garm/params"
)
+// swagger:route GET /pools/{poolID}/instances instances ListPoolInstances
+//
+// List runner instances in a pool.
+//
+// Parameters:
+// + name: poolID
+// description: Runner pool ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Instances
+// default: APIErrorResponse
func (a *APIController) ListPoolInstancesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -36,24 +51,86 @@ func (a *APIController) ListPoolInstancesHandler(w http.ResponseWriter, r *http.
Error: "Bad Request",
Details: "No pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
instances, err := a.r.ListPoolInstances(ctx, poolID)
if err != nil {
- log.Printf("listing pool instances: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pool instances")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instances); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /scalesets/{scalesetID}/instances instances ListScaleSetInstances
+//
+// List runner instances in a scale set.
+//
+// Parameters:
+// + name: scalesetID
+// description: Runner scale set ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Instances
+// default: APIErrorResponse
+func (a *APIController) ListScaleSetInstancesHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ scalesetID, ok := vars["scalesetID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No pool ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+ id, err := strconv.ParseUint(scalesetID, 10, 32)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ instances, err := a.r.ListScaleSetInstances(ctx, uint(id))
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pool instances")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(instances); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /instances/{instanceName} instances GetInstance
+//
+// Get runner instance by name.
+//
+// Parameters:
+// + name: instanceName
+// description: Runner instance name.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Instance
+// default: APIErrorResponse
func (a *APIController) GetInstanceHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -64,24 +141,50 @@ func (a *APIController) GetInstanceHandler(w http.ResponseWriter, r *http.Reques
Error: "Bad Request",
Details: "No runner name specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
instance, err := a.r.GetInstance(ctx, instanceName)
if err != nil {
- log.Printf("listing instances: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing instances")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instance); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route DELETE /instances/{instanceName} instances DeleteInstance
+//
+// Delete runner instance by name.
+//
+// Parameters:
+// + name: instanceName
+// description: Runner instance name.
+// type: string
+// in: path
+// required: true
+//
+// + name: forceRemove
+// description: If true GARM will ignore any provider error when removing the runner and will continue to remove the runner from github and the GARM database.
+// type: boolean
+// in: query
+// required: false
+//
+// + name: bypassGHUnauthorized
+// description: If true GARM will ignore unauthorized errors returned by GitHub when removing a runner. This is useful if you want to clean up runners and your credentials have expired.
+// type: boolean
+// in: query
+// required: false
+//
+// Responses:
+//
+// default: APIErrorResponse
func (a *APIController) DeleteInstanceHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -92,14 +195,16 @@ func (a *APIController) DeleteInstanceHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No instance name specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
- if err := a.r.ForceDeleteRunner(ctx, instanceName); err != nil {
- log.Printf("removing runner: %s", err)
- handleError(w, err)
+ forceRemove, _ := strconv.ParseBool(r.URL.Query().Get("forceRemove"))
+ bypassGHUnauthorized, _ := strconv.ParseBool(r.URL.Query().Get("bypassGHUnauthorized"))
+ if err := a.r.DeleteRunner(ctx, instanceName, forceRemove, bypassGHUnauthorized); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing runner")
+ handleError(ctx, w, err)
return
}
@@ -107,6 +212,20 @@ func (a *APIController) DeleteInstanceHandler(w http.ResponseWriter, r *http.Req
w.WriteHeader(http.StatusOK)
}
+// swagger:route GET /repositories/{repoID}/instances repositories instances ListRepoInstances
+//
+// List repository instances.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Instances
+// default: APIErrorResponse
func (a *APIController) ListRepoInstancesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -117,24 +236,38 @@ func (a *APIController) ListRepoInstancesHandler(w http.ResponseWriter, r *http.
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
instances, err := a.r.ListRepoInstances(ctx, repoID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instances); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /organizations/{orgID}/instances organizations instances ListOrgInstances
+//
+// List organization instances.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Instances
+// default: APIErrorResponse
func (a *APIController) ListOrgInstancesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -145,24 +278,38 @@ func (a *APIController) ListOrgInstancesHandler(w http.ResponseWriter, r *http.R
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
instances, err := a.r.ListOrgInstances(ctx, orgID)
if err != nil {
- log.Printf("listing instances: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing instances")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instances); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /enterprises/{enterpriseID}/instances enterprises instances ListEnterpriseInstances
+//
+// List enterprise instances.
+//
+// Parameters:
+// + name: enterpriseID
+// description: Enterprise ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Instances
+// default: APIErrorResponse
func (a *APIController) ListEnterpriseInstancesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -173,37 +320,44 @@ func (a *APIController) ListEnterpriseInstancesHandler(w http.ResponseWriter, r
Error: "Bad Request",
Details: "No enterprise ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
instances, err := a.r.ListEnterpriseInstances(ctx, enterpriseID)
if err != nil {
- log.Printf("listing instances: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing instances")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instances); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /instances instances ListInstances
+//
+// Get all runners' instances.
+//
+// Responses:
+// 200: Instances
+// default: APIErrorResponse
func (a *APIController) ListAllInstancesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
instances, err := a.r.ListAllInstances(ctx)
if err != nil {
- log.Printf("listing instances: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing instances")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(instances); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
@@ -212,14 +366,14 @@ func (a *APIController) InstanceStatusMessageHandler(w http.ResponseWriter, r *h
var updateMessage runnerParams.InstanceUpdateMessage
if err := json.NewDecoder(r.Body).Decode(&updateMessage); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
if err := a.r.AddInstanceStatusMessage(ctx, updateMessage); err != nil {
- log.Printf("error saving status message: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error saving status message")
+ handleError(ctx, w, err)
return
}
@@ -227,18 +381,22 @@ func (a *APIController) InstanceStatusMessageHandler(w http.ResponseWriter, r *h
w.WriteHeader(http.StatusOK)
}
-func (a *APIController) InstanceGithubRegistrationTokenHandler(w http.ResponseWriter, r *http.Request) {
+func (a *APIController) InstanceSystemInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- token, err := a.r.GetInstanceGithubRegistrationToken(ctx)
- if err != nil {
- handleError(w, err)
+ var updateMessage runnerParams.UpdateSystemInfoParams
+ if err := json.NewDecoder(r.Body).Decode(&updateMessage); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if err := a.r.UpdateSystemInfo(ctx, updateMessage); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error saving status message")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
- if _, err := w.Write([]byte(token)); err != nil {
- log.Printf("failed to encode response: %q", err)
- }
}
diff --git a/apiserver/controllers/metadata.go b/apiserver/controllers/metadata.go
new file mode 100644
index 00000000..4b112b17
--- /dev/null
+++ b/apiserver/controllers/metadata.go
@@ -0,0 +1,125 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package controllers
+
+import (
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+
+ "github.com/gorilla/mux"
+
+ "github.com/cloudbase/garm/apiserver/params"
+)
+
+func (a *APIController) InstanceGithubRegistrationTokenHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ token, err := a.r.GetInstanceGithubRegistrationToken(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ if _, err := w.Write([]byte(token)); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+func (a *APIController) JITCredentialsFileHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ fileName, ok := vars["fileName"]
+ if !ok {
+ w.WriteHeader(http.StatusNotFound)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Not Found",
+ Details: "Not Found",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ dotFileName := fmt.Sprintf(".%s", fileName)
+
+ data, err := a.r.GetJITConfigFile(ctx, dotFileName)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "getting JIT config file")
+ handleError(ctx, w, err)
+ return
+ }
+
+ // Note the leading dot in the filename
+ name := fmt.Sprintf("attachment; filename=%s", dotFileName)
+ w.Header().Set("Content-Disposition", name)
+ w.Header().Set("Content-Type", "octet-stream")
+ w.Header().Set("Content-Length", fmt.Sprintf("%d", len(data)))
+ w.WriteHeader(http.StatusOK)
+ if _, err := w.Write(data); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+func (a *APIController) SystemdServiceNameHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ serviceName, err := a.r.GetRunnerServiceName(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain")
+ w.WriteHeader(http.StatusOK)
+ if _, err := w.Write([]byte(serviceName)); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+func (a *APIController) SystemdUnitFileHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ runAsUser := r.URL.Query().Get("runAsUser")
+
+ data, err := a.r.GenerateSystemdUnitFile(ctx, runAsUser)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain")
+ w.WriteHeader(http.StatusOK)
+ if _, err := w.Write(data); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+func (a *APIController) RootCertificateBundleHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ bundle, err := a.r.GetRootCertificateBundle(ctx)
+ if err != nil {
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(bundle); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/controllers/organizations.go b/apiserver/controllers/organizations.go
index 068549a0..9089f440 100644
--- a/apiserver/controllers/organizations.go
+++ b/apiserver/controllers/organizations.go
@@ -16,54 +16,107 @@ package controllers
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
-
- "github.com/cloudbase/garm/apiserver/params"
- gErrors "github.com/cloudbase/garm/errors"
- runnerParams "github.com/cloudbase/garm/params"
+ "strconv"
"github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/apiserver/params"
+ runnerParams "github.com/cloudbase/garm/params"
)
+// swagger:route POST /organizations organizations CreateOrg
+//
+// Create organization with the parameters given.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when creating the organization.
+// type: CreateOrgParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Organization
+// default: APIErrorResponse
func (a *APIController) CreateOrgHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- var repoData runnerParams.CreateOrgParams
- if err := json.NewDecoder(r.Body).Decode(&repoData); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ var orgData runnerParams.CreateOrgParams
+ if err := json.NewDecoder(r.Body).Decode(&orgData); err != nil {
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
- repo, err := a.r.CreateOrganization(ctx, repoData)
+ org, err := a.r.CreateOrganization(ctx, orgData)
if err != nil {
- log.Printf("error creating repository: %+v", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating organization")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
- if err := json.NewEncoder(w).Encode(repo); err != nil {
- log.Printf("failed to encode response: %q", err)
+ if err := json.NewEncoder(w).Encode(org); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /organizations organizations ListOrgs
+//
+// List organizations.
+//
+// Parameters:
+// + name: name
+// description: Exact organization name to filter by
+// type: string
+// in: query
+// required: false
+//
+// + name: endpoint
+// description: Exact endpoint name to filter by
+// type: string
+// in: query
+// required: false
+//
+// Responses:
+// 200: Organizations
+// default: APIErrorResponse
func (a *APIController) ListOrgsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- orgs, err := a.r.ListOrganizations(ctx)
+ filter := runnerParams.OrganizationFilter{
+ Name: r.URL.Query().Get("name"),
+ Endpoint: r.URL.Query().Get("endpoint"),
+ }
+ orgs, err := a.r.ListOrganizations(ctx, filter)
if err != nil {
- log.Printf("listing orgs: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing orgs")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(orgs); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /organizations/{orgID} organizations GetOrg
+//
+// Get organization by ID.
+//
+// Parameters:
+// + name: orgID
+// description: ID of the organization to fetch.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Organization
+// default: APIErrorResponse
func (a *APIController) GetOrgByIDHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -75,24 +128,43 @@ func (a *APIController) GetOrgByIDHandler(w http.ResponseWriter, r *http.Request
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
org, err := a.r.GetOrganizationByID(ctx, orgID)
if err != nil {
- log.Printf("fetching org: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching org")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(org); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route DELETE /organizations/{orgID} organizations DeleteOrg
+//
+// Delete organization by ID.
+//
+// Parameters:
+// + name: orgID
+// description: ID of the organization to delete.
+// type: string
+// in: path
+// required: true
+//
+// + name: keepWebhook
+// description: If true and a webhook is installed for this organization, it will not be removed.
+// type: boolean
+// in: query
+// required: false
+//
+// Responses:
+// default: APIErrorResponse
func (a *APIController) DeleteOrgHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -104,22 +176,43 @@ func (a *APIController) DeleteOrgHandler(w http.ResponseWriter, r *http.Request)
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
- if err := a.r.DeleteOrganization(ctx, orgID); err != nil {
- log.Printf("removing org: %+v", err)
- handleError(w, err)
+ keepWebhook, _ := strconv.ParseBool(r.URL.Query().Get("keepWebhook"))
+
+ if err := a.r.DeleteOrganization(ctx, orgID, keepWebhook); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing org")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
+// swagger:route PUT /organizations/{orgID} organizations UpdateOrg
+//
+// Update organization with the parameters given.
+//
+// Parameters:
+// + name: orgID
+// description: ID of the organization to update.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when updating the organization.
+// type: UpdateEntityParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Organization
+// default: APIErrorResponse
func (a *APIController) UpdateOrgHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -131,30 +224,50 @@ func (a *APIController) UpdateOrgHandler(w http.ResponseWriter, r *http.Request)
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
- var updatePayload runnerParams.UpdateRepositoryParams
+ var updatePayload runnerParams.UpdateEntityParams
if err := json.NewDecoder(r.Body).Decode(&updatePayload); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
org, err := a.r.UpdateOrganization(ctx, orgID, updatePayload)
if err != nil {
- log.Printf("error updating organization: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error updating organization")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(org); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route POST /organizations/{orgID}/pools organizations pools CreateOrgPool
+//
+// Create organization pool with the parameters given.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the organization pool.
+// type: CreatePoolParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Pool
+// default: APIErrorResponse
func (a *APIController) CreateOrgPoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -166,31 +279,101 @@ func (a *APIController) CreateOrgPoolHandler(w http.ResponseWriter, r *http.Requ
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.CreatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.CreateOrgPool(ctx, orgID, poolData)
if err != nil {
- log.Printf("error creating organization pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating organization pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route POST /organizations/{orgID}/scalesets organizations scalesets CreateOrgScaleSet
+//
+// Create organization scale set with the parameters given.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the organization scale set.
+// type: CreateScaleSetParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ScaleSet
+// default: APIErrorResponse
+func (a *APIController) CreateOrgScaleSetHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ orgID, ok := vars["orgID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No org ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ var scalesetData runnerParams.CreateScaleSetParams
+ if err := json.NewDecoder(r.Body).Decode(&scalesetData); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ scaleSet, err := a.r.CreateEntityScaleSet(ctx, runnerParams.ForgeEntityTypeOrganization, orgID, scalesetData)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating organization scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSet); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /organizations/{orgID}/pools organizations pools ListOrgPools
+//
+// List organization pools.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Pools
+// default: APIErrorResponse
func (a *APIController) ListOrgPoolsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -201,53 +384,134 @@ func (a *APIController) ListOrgPoolsHandler(w http.ResponseWriter, r *http.Reque
Error: "Bad Request",
Details: "No org ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pools, err := a.r.ListOrgPools(ctx, orgID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pools); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /organizations/{orgID}/scalesets organizations scalesets ListOrgScaleSets
+//
+// List organization scale sets.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ScaleSets
+// default: APIErrorResponse
+func (a *APIController) ListOrgScaleSetsHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ orgID, ok := vars["orgID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No org ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ scaleSets, err := a.r.ListEntityScaleSets(ctx, runnerParams.ForgeEntityTypeOrganization, orgID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing scale sets")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSets); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /organizations/{orgID}/pools/{poolID} organizations pools GetOrgPool
+//
+// Get organization pool by ID.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: poolID
+// description: Pool ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Pool
+// default: APIErrorResponse
func (a *APIController) GetOrgPoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
- orgID, repoOk := vars["orgID"]
+ orgID, orgOk := vars["orgID"]
poolID, poolOk := vars["poolID"]
- if !repoOk || !poolOk {
+ if !orgOk || !poolOk {
w.WriteHeader(http.StatusBadRequest)
if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
Error: "Bad Request",
Details: "No org or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pool, err := a.r.GetOrgPoolByID(ctx, orgID, poolID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route DELETE /organizations/{orgID}/pools/{poolID} organizations pools DeleteOrgPool
+//
+// Delete organization pool by ID.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: poolID
+// description: ID of the organization pool to delete.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
func (a *APIController) DeleteOrgPoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -260,22 +524,47 @@ func (a *APIController) DeleteOrgPoolHandler(w http.ResponseWriter, r *http.Requ
Error: "Bad Request",
Details: "No org or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if err := a.r.DeleteOrgPool(ctx, orgID, poolID); err != nil {
- log.Printf("removing pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
+// swagger:route PUT /organizations/{orgID}/pools/{poolID} organizations pools UpdateOrgPool
+//
+// Update organization pool with the parameters given.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: poolID
+// description: ID of the organization pool to update.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when updating the organization pool.
+// type: UpdatePoolParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Pool
+// default: APIErrorResponse
func (a *APIController) UpdateOrgPoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -288,27 +577,166 @@ func (a *APIController) UpdateOrgPoolHandler(w http.ResponseWriter, r *http.Requ
Error: "Bad Request",
Details: "No org or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.UpdatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.UpdateOrgPool(ctx, orgID, poolID, poolData)
if err != nil {
- log.Printf("error creating organization pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating organization pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route POST /organizations/{orgID}/webhook organizations hooks InstallOrgWebhook
+//
+// Install the GARM webhook for an organization. The secret configured on the organization will
+// be used to validate the requests.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the organization webhook.
+// type: InstallWebhookParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: HookInfo
+// default: APIErrorResponse
+func (a *APIController) InstallOrgWebhookHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ orgID, orgOk := vars["orgID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No org ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ var hookParam runnerParams.InstallWebhookParams
+ if err := json.NewDecoder(r.Body).Decode(&hookParam); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ info, err := a.r.InstallOrgWebhook(ctx, orgID, hookParam)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "installing webhook")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /organizations/{orgID}/webhook organizations hooks UninstallOrgWebhook
+//
+// Uninstall organization webhook.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) UninstallOrgWebhookHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ orgID, orgOk := vars["orgID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No org ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ if err := a.r.UninstallOrgWebhook(ctx, orgID); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing webhook")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+}
+
+// swagger:route GET /organizations/{orgID}/webhook organizations hooks GetOrgWebhookInfo
+//
+// Get information about the GARM installed webhook on an organization.
+//
+// Parameters:
+// + name: orgID
+// description: Organization ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: HookInfo
+// default: APIErrorResponse
+func (a *APIController) GetOrgWebhookInfoHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ orgID, orgOk := vars["orgID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No org ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ info, err := a.r.GetOrgWebhookInfo(ctx, orgID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "getting webhook info")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
diff --git a/apiserver/controllers/pools.go b/apiserver/controllers/pools.go
index b4e5ffa2..901be588 100644
--- a/apiserver/controllers/pools.go
+++ b/apiserver/controllers/pools.go
@@ -16,33 +16,53 @@ package controllers
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
- "github.com/cloudbase/garm/apiserver/params"
- gErrors "github.com/cloudbase/garm/errors"
- runnerParams "github.com/cloudbase/garm/params"
-
"github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/apiserver/params"
+ runnerParams "github.com/cloudbase/garm/params"
)
+// swagger:route GET /pools pools ListPools
+//
+// List all pools.
+//
+// Responses:
+// 200: Pools
+// default: APIErrorResponse
func (a *APIController) ListAllPoolsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
pools, err := a.r.ListAllPools(ctx)
-
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pools); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /pools/{poolID} pools GetPool
+//
+// Get pool by ID.
+//
+// Parameters:
+// + name: poolID
+// description: ID of the pool to fetch.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Pool
+// default: APIErrorResponse
func (a *APIController) GetPoolByIDHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -54,15 +74,15 @@ func (a *APIController) GetPoolByIDHandler(w http.ResponseWriter, r *http.Reques
Error: "Bad Request",
Details: "No pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pool, err := a.r.GetPoolByID(ctx, poolID)
if err != nil {
- log.Printf("fetching pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching pool")
+ handleError(ctx, w, err)
return
}
@@ -70,10 +90,23 @@ func (a *APIController) GetPoolByIDHandler(w http.ResponseWriter, r *http.Reques
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route DELETE /pools/{poolID} pools DeletePool
+//
+// Delete pool by ID.
+//
+// Parameters:
+// + name: poolID
+// description: ID of the pool to delete.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
func (a *APIController) DeletePoolByIDHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -85,14 +118,14 @@ func (a *APIController) DeletePoolByIDHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if err := a.r.DeletePoolByID(ctx, poolID); err != nil {
- log.Printf("removing pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing pool")
+ handleError(ctx, w, err)
return
}
@@ -100,6 +133,26 @@ func (a *APIController) DeletePoolByIDHandler(w http.ResponseWriter, r *http.Req
w.WriteHeader(http.StatusOK)
}
+// swagger:route PUT /pools/{poolID} pools UpdatePool
+//
+// Update pool by ID.
+//
+// Parameters:
+// + name: poolID
+// description: ID of the pool to update.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters to update the pool with.
+// type: UpdatePoolParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Pool
+// default: APIErrorResponse
func (a *APIController) UpdatePoolByIDHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -111,27 +164,27 @@ func (a *APIController) UpdatePoolByIDHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.UpdatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.UpdatePoolByID(ctx, poolID, poolData)
if err != nil {
- log.Printf("fetching pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
diff --git a/apiserver/controllers/repositories.go b/apiserver/controllers/repositories.go
index da5200e3..f3675790 100644
--- a/apiserver/controllers/repositories.go
+++ b/apiserver/controllers/repositories.go
@@ -16,54 +16,114 @@ package controllers
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
-
- "github.com/cloudbase/garm/apiserver/params"
- gErrors "github.com/cloudbase/garm/errors"
- runnerParams "github.com/cloudbase/garm/params"
+ "strconv"
"github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/apiserver/params"
+ runnerParams "github.com/cloudbase/garm/params"
)
+// swagger:route POST /repositories repositories CreateRepo
+//
+// Create repository with the parameters given.
+//
+// Parameters:
+// + name: Body
+// description: Parameters used when creating the repository.
+// type: CreateRepoParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Repository
+// default: APIErrorResponse
func (a *APIController) CreateRepoHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var repoData runnerParams.CreateRepoParams
if err := json.NewDecoder(r.Body).Decode(&repoData); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
repo, err := a.r.CreateRepository(ctx, repoData)
if err != nil {
- log.Printf("error creating repository: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating repository")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(repo); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /repositories repositories ListRepos
+//
+// List repositories.
+//
+// Parameters:
+// + name: owner
+// description: Exact owner name to filter by
+// type: string
+// in: query
+// required: false
+//
+// + name: name
+// description: Exact repository name to filter by
+// type: string
+// in: query
+// required: false
+//
+// + name: endpoint
+// description: Exact endpoint name to filter by
+// type: string
+// in: query
+// required: false
+//
+// Responses:
+// 200: Repositories
+// default: APIErrorResponse
func (a *APIController) ListReposHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
- repos, err := a.r.ListRepositories(ctx)
+ filter := runnerParams.RepositoryFilter{
+ Name: r.URL.Query().Get("name"),
+ Owner: r.URL.Query().Get("owner"),
+ Endpoint: r.URL.Query().Get("endpoint"),
+ }
+ repos, err := a.r.ListRepositories(ctx, filter)
if err != nil {
- log.Printf("listing repos: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing repositories")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(repos); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /repositories/{repoID} repositories GetRepo
+//
+// Get repository by ID.
+//
+// Parameters:
+// + name: repoID
+// description: ID of the repository to fetch.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Repository
+// default: APIErrorResponse
func (a *APIController) GetRepoByIDHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -75,24 +135,43 @@ func (a *APIController) GetRepoByIDHandler(w http.ResponseWriter, r *http.Reques
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
repo, err := a.r.GetRepositoryByID(ctx, repoID)
if err != nil {
- log.Printf("fetching repo: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching repository")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(repo); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route DELETE /repositories/{repoID} repositories DeleteRepo
+//
+// Delete repository by ID.
+//
+// Parameters:
+// + name: repoID
+// description: ID of the repository to delete.
+// type: string
+// in: path
+// required: true
+//
+// + name: keepWebhook
+// description: If true and a webhook is installed for this repo, it will not be removed.
+// type: boolean
+// in: query
+// required: false
+//
+// Responses:
+// default: APIErrorResponse
func (a *APIController) DeleteRepoHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -104,22 +183,42 @@ func (a *APIController) DeleteRepoHandler(w http.ResponseWriter, r *http.Request
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
- if err := a.r.DeleteRepository(ctx, repoID); err != nil {
- log.Printf("fetching repo: %s", err)
- handleError(w, err)
+ keepWebhook, _ := strconv.ParseBool(r.URL.Query().Get("keepWebhook"))
+ if err := a.r.DeleteRepository(ctx, repoID, keepWebhook); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching repository")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
+// swagger:route PUT /repositories/{repoID} repositories UpdateRepo
+//
+// Update repository with the parameters given.
+//
+// Parameters:
+// + name: repoID
+// description: ID of the repository to update.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when updating the repository.
+// type: UpdateEntityParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Repository
+// default: APIErrorResponse
func (a *APIController) UpdateRepoHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -131,30 +230,50 @@ func (a *APIController) UpdateRepoHandler(w http.ResponseWriter, r *http.Request
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
- var updatePayload runnerParams.UpdateRepositoryParams
+ var updatePayload runnerParams.UpdateEntityParams
if err := json.NewDecoder(r.Body).Decode(&updatePayload); err != nil {
- handleError(w, gErrors.ErrBadRequest)
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
repo, err := a.r.UpdateRepository(ctx, repoID, updatePayload)
if err != nil {
- log.Printf("error updating repository: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error updating repository")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(repo); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route POST /repositories/{repoID}/pools repositories pools CreateRepoPool
+//
+// Create repository pool with the parameters given.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the repository pool.
+// type: CreatePoolParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Pool
+// default: APIErrorResponse
func (a *APIController) CreateRepoPoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -166,31 +285,101 @@ func (a *APIController) CreateRepoPoolHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.CreatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.CreateRepoPool(ctx, repoID, poolData)
if err != nil {
- log.Printf("error creating repository pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating repository pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route POST /repositories/{repoID}/scalesets repositories scalesets CreateRepoScaleSet
+//
+// Create repository scale set with the parameters given.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the repository scale set.
+// type: CreateScaleSetParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ScaleSet
+// default: APIErrorResponse
+func (a *APIController) CreateRepoScaleSetHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ repoID, ok := vars["repoID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No repo ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ var scaleSetData runnerParams.CreateScaleSetParams
+ if err := json.NewDecoder(r.Body).Decode(&scaleSetData); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ scaleSet, err := a.r.CreateEntityScaleSet(ctx, runnerParams.ForgeEntityTypeRepository, repoID, scaleSetData)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating repository scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSet); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /repositories/{repoID}/pools repositories pools ListRepoPools
+//
+// List repository pools.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Pools
+// default: APIErrorResponse
func (a *APIController) ListRepoPoolsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -201,24 +390,86 @@ func (a *APIController) ListRepoPoolsHandler(w http.ResponseWriter, r *http.Requ
Error: "Bad Request",
Details: "No repo ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pools, err := a.r.ListRepoPools(ctx, repoID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pools); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route GET /repositories/{repoID}/scalesets repositories scalesets ListRepoScaleSets
+//
+// List repository scale sets.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ScaleSets
+// default: APIErrorResponse
+func (a *APIController) ListRepoScaleSetsHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ vars := mux.Vars(r)
+ repoID, ok := vars["repoID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No repo ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ scaleSets, err := a.r.ListEntityScaleSets(ctx, runnerParams.ForgeEntityTypeRepository, repoID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing scale sets")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSets); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /repositories/{repoID}/pools/{poolID} repositories pools GetRepoPool
+//
+// Get repository pool by ID.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: poolID
+// description: Pool ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: Pool
+// default: APIErrorResponse
func (a *APIController) GetRepoPoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
@@ -230,24 +481,43 @@ func (a *APIController) GetRepoPoolHandler(w http.ResponseWriter, r *http.Reques
Error: "Bad Request",
Details: "No repo or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
pool, err := a.r.GetRepoPoolByID(ctx, repoID, poolID)
if err != nil {
- log.Printf("listing pools: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing pools")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+// swagger:route DELETE /repositories/{repoID}/pools/{poolID} repositories pools DeleteRepoPool
+//
+// Delete repository pool by ID.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: poolID
+// description: ID of the repository pool to delete.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
func (a *APIController) DeleteRepoPoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -260,22 +530,47 @@ func (a *APIController) DeleteRepoPoolHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No repo or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
if err := a.r.DeleteRepoPool(ctx, repoID, poolID); err != nil {
- log.Printf("removing pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
-
}
+// swagger:route PUT /repositories/{repoID}/pools/{poolID} repositories pools UpdateRepoPool
+//
+// Update repository pool with the parameters given.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: poolID
+// description: ID of the repository pool to update.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when updating the repository pool.
+// type: UpdatePoolParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: Pool
+// default: APIErrorResponse
func (a *APIController) UpdateRepoPoolHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@@ -288,27 +583,166 @@ func (a *APIController) UpdateRepoPoolHandler(w http.ResponseWriter, r *http.Req
Error: "Bad Request",
Details: "No repo or pool ID specified",
}); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
var poolData runnerParams.UpdatePoolParams
if err := json.NewDecoder(r.Body).Decode(&poolData); err != nil {
- log.Printf("failed to decode: %s", err)
- handleError(w, gErrors.ErrBadRequest)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
return
}
pool, err := a.r.UpdateRepoPool(ctx, repoID, poolID, poolData)
if err != nil {
- log.Printf("error creating repository pool: %s", err)
- handleError(w, err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "error creating repository pool")
+ handleError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(pool); err != nil {
- log.Printf("failed to encode response: %q", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route POST /repositories/{repoID}/webhook repositories hooks InstallRepoWebhook
+//
+// Install the GARM webhook for an organization. The secret configured on the organization will
+// be used to validate the requests.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters used when creating the repository webhook.
+// type: InstallWebhookParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: HookInfo
+// default: APIErrorResponse
+func (a *APIController) InstallRepoWebhookHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ repoID, orgOk := vars["repoID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No repository ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ var hookParam runnerParams.InstallWebhookParams
+ if err := json.NewDecoder(r.Body).Decode(&hookParam); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ info, err := a.r.InstallRepoWebhook(ctx, repoID, hookParam)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "installing webhook")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /repositories/{repoID}/webhook repositories hooks UninstallRepoWebhook
+//
+// Uninstall organization webhook.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) UninstallRepoWebhookHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ repoID, orgOk := vars["repoID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No repository ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ if err := a.r.UninstallRepoWebhook(ctx, repoID); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing webhook")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+}
+
+// swagger:route GET /repositories/{repoID}/webhook repositories hooks GetRepoWebhookInfo
+//
+// Get information about the GARM installed webhook on a repository.
+//
+// Parameters:
+// + name: repoID
+// description: Repository ID.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: HookInfo
+// default: APIErrorResponse
+func (a *APIController) GetRepoWebhookInfoHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ repoID, orgOk := vars["repoID"]
+ if !orgOk {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No repository ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ info, err := a.r.GetRepoWebhookInfo(ctx, repoID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "getting webhook info")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(info); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
diff --git a/apiserver/controllers/scalesets.go b/apiserver/controllers/scalesets.go
new file mode 100644
index 00000000..1d26221b
--- /dev/null
+++ b/apiserver/controllers/scalesets.go
@@ -0,0 +1,211 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package controllers
+
+import (
+ "encoding/json"
+ "log/slog"
+ "net/http"
+ "strconv"
+
+ "github.com/gorilla/mux"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/apiserver/params"
+ runnerParams "github.com/cloudbase/garm/params"
+)
+
+// swagger:route GET /scalesets scalesets ListScalesets
+//
+// List all scalesets.
+//
+// Responses:
+// 200: ScaleSets
+// default: APIErrorResponse
+func (a *APIController) ListAllScaleSetsHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ scalesets, err := a.r.ListAllScaleSets(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "listing scale sets")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scalesets); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route GET /scalesets/{scalesetID} scalesets GetScaleSet
+//
+// Get scale set by ID.
+//
+// Parameters:
+// + name: scalesetID
+// description: ID of the scale set to fetch.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// 200: ScaleSet
+// default: APIErrorResponse
+func (a *APIController) GetScaleSetByIDHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ scaleSetID, ok := vars["scalesetID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No scale set ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+ id, err := strconv.ParseUint(scaleSetID, 10, 32)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ scaleSet, err := a.r.GetScaleSetByID(ctx, uint(id))
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "fetching scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ scaleSet.RunnerBootstrapTimeout = scaleSet.RunnerTimeout()
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSet); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
+
+// swagger:route DELETE /scalesets/{scalesetID} scalesets DeleteScaleSet
+//
+// Delete scale set by ID.
+//
+// Parameters:
+// + name: scalesetID
+// description: ID of the scale set to delete.
+// type: string
+// in: path
+// required: true
+//
+// Responses:
+// default: APIErrorResponse
+func (a *APIController) DeleteScaleSetByIDHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ scalesetID, ok := vars["scalesetID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No scale set ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ id, err := strconv.ParseUint(scalesetID, 10, 32)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ if err := a.r.DeleteScaleSetByID(ctx, uint(id)); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "removing scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+}
+
+// swagger:route PUT /scalesets/{scalesetID} scalesets UpdateScaleSet
+//
+// Update scale set by ID.
+//
+// Parameters:
+// + name: scalesetID
+// description: ID of the scale set to update.
+// type: string
+// in: path
+// required: true
+//
+// + name: Body
+// description: Parameters to update the scale set with.
+// type: UpdateScaleSetParams
+// in: body
+// required: true
+//
+// Responses:
+// 200: ScaleSet
+// default: APIErrorResponse
+func (a *APIController) UpdateScaleSetByIDHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ vars := mux.Vars(r)
+ scalesetID, ok := vars["scalesetID"]
+ if !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ if err := json.NewEncoder(w).Encode(params.APIErrorResponse{
+ Error: "Bad Request",
+ Details: "No scale set ID specified",
+ }); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
+ id, err := strconv.ParseUint(scalesetID, 10, 32)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to parse id")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ var scaleSetData runnerParams.UpdateScaleSetParams
+ if err := json.NewDecoder(r.Body).Decode(&scaleSetData); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to decode")
+ handleError(ctx, w, gErrors.ErrBadRequest)
+ return
+ }
+
+ scaleSet, err := a.r.UpdateScaleSetByID(ctx, uint(id), scaleSetData)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "updating scale set")
+ handleError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(scaleSet); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+}
diff --git a/apiserver/params/params.go b/apiserver/params/params.go
index 23283a07..ec42fab6 100644
--- a/apiserver/params/params.go
+++ b/apiserver/params/params.go
@@ -14,6 +14,7 @@
package params
+// swagger:model APIErrorResponse
// APIErrorResponse holds information about an error, returned by the API
type APIErrorResponse struct {
Error string `json:"error"`
@@ -36,4 +37,9 @@ var (
Error: "init_required",
Details: "Missing superuser",
}
+ // URLsRequired is returned if the controller does not have the required URLs
+ URLsRequired = APIErrorResponse{
+ Error: "urls_required",
+ Details: "Missing required URLs. Make sure you update the metadata and callback URLs",
+ }
)
diff --git a/apiserver/routers/routers.go b/apiserver/routers/routers.go
index bac3f98d..ff241165 100644
--- a/apiserver/routers/routers.go
+++ b/apiserver/routers/routers.go
@@ -12,18 +12,53 @@
// License for the specific language governing permissions and limitations
// under the License.
+// Package routers Garm API.
+//
+// The Garm API generated using go-swagger.
+//
+// BasePath: /api/v1
+// Version: 1.0.0
+// License: Apache 2.0 https://www.apache.org/licenses/LICENSE-2.0
+//
+// Consumes:
+// - application/json
+//
+// Produces:
+// - application/json
+//
+// Security:
+// - Bearer:
+//
+// SecurityDefinitions:
+// Bearer:
+// type: apiKey
+// name: Authorization
+// in: header
+// description: >-
+// The token with the `Bearer: ` prefix, e.g. "Bearer abcde12345".
+//
+// swagger:meta
package routers
-import (
- "io"
- "net/http"
+//go:generate go run github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 generate spec --input=../swagger-models.yaml --output=../swagger.yaml --include="routers|controllers"
+//go:generate go run github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 validate ../swagger.yaml
+//go:generate rm -rf ../../client
+//go:generate go run github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 generate client --target=../../ --spec=../swagger.yaml
+import (
+ _ "expvar" // Register the expvar handlers
+ "log/slog"
+ "net/http"
+ _ "net/http/pprof" //nolint:golangci-lint,gosec // Register the pprof handlers
+
+ "github.com/felixge/httpsnoop"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/cloudbase/garm/apiserver/controllers"
"github.com/cloudbase/garm/auth"
- "github.com/cloudbase/garm/util"
+ "github.com/cloudbase/garm/config"
+ spaAssets "github.com/cloudbase/garm/webapp/assets"
)
func WithMetricsRouter(parentRouter *mux.Router, disableAuth bool, metricsMiddlerware auth.Middleware) *mux.Router {
@@ -40,15 +75,67 @@ func WithMetricsRouter(parentRouter *mux.Router, disableAuth bool, metricsMiddle
return parentRouter
}
-func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddleware, initMiddleware, instanceMiddleware auth.Middleware) *mux.Router {
+func WithDebugServer(parentRouter *mux.Router) *mux.Router {
+ if parentRouter == nil {
+ return nil
+ }
+
+ parentRouter.PathPrefix("/debug/pprof/").Handler(http.DefaultServeMux)
+ return parentRouter
+}
+
+func WithWebUI(parentRouter *mux.Router, apiConfig config.APIServer) *mux.Router {
+ if parentRouter == nil {
+ return nil
+ }
+
+ if apiConfig.WebUI.EnableWebUI {
+ slog.Info("WebUI is enabled, adding webapp routes")
+ webappPath := apiConfig.WebUI.GetWebappPath()
+ slog.Info("Using webapp path", "path", webappPath)
+ // Accessing / should redirect to the UI
+ parentRouter.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, webappPath, http.StatusMovedPermanently) // 301
+ })
+ // Serve the SPA with dynamic path
+ parentRouter.PathPrefix(webappPath).HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ spaAssets.ServeSPAWithPath(w, r, webappPath)
+ }).Methods("GET")
+ } else {
+ slog.Info("WebUI is disabled, skipping webapp routes")
+ }
+
+ return parentRouter
+}
+
+func requestLogger(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // gathers metrics from the upstream handlers
+ metrics := httpsnoop.CaptureMetrics(h, w, r)
+
+ slog.Info(
+ "access_log",
+ slog.String("method", r.Method),
+ slog.String("uri", r.URL.RequestURI()),
+ slog.String("user_agent", r.Header.Get("User-Agent")),
+ slog.String("ip", r.RemoteAddr),
+ slog.Int("code", metrics.Code),
+ slog.Int64("bytes", metrics.Written),
+ slog.Duration("request_time", metrics.Duration),
+ )
+ })
+}
+
+func NewAPIRouter(han *controllers.APIController, authMiddleware, initMiddleware, urlsRequiredMiddleware, instanceMiddleware auth.Middleware, manageWebhooks bool) *mux.Router {
router := mux.NewRouter()
- logMiddleware := util.NewLoggingMiddleware(logWriter)
- router.Use(logMiddleware)
+ router.Use(requestLogger)
// Handles github webhooks
webhookRouter := router.PathPrefix("/webhooks").Subrouter()
- webhookRouter.PathPrefix("/").Handler(http.HandlerFunc(han.CatchAll))
- webhookRouter.PathPrefix("").Handler(http.HandlerFunc(han.CatchAll))
+ webhookRouter.Handle("/", http.HandlerFunc(han.WebhookHandler))
+ webhookRouter.Handle("", http.HandlerFunc(han.WebhookHandler))
+ webhookRouter.Handle("/{controllerID}/", http.HandlerFunc(han.WebhookHandler))
+ webhookRouter.Handle("/{controllerID}", http.HandlerFunc(han.WebhookHandler))
// Handles API calls
apiSubRouter := router.PathPrefix("/api/v1").Subrouter()
@@ -56,30 +143,84 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
// FirstRunHandler
firstRunRouter := apiSubRouter.PathPrefix("/first-run").Subrouter()
firstRunRouter.Handle("/", http.HandlerFunc(han.FirstRunHandler)).Methods("POST", "OPTIONS")
+ firstRunRouter.Handle("", http.HandlerFunc(han.FirstRunHandler)).Methods("POST", "OPTIONS")
// Instance URLs
callbackRouter := apiSubRouter.PathPrefix("/callbacks").Subrouter()
callbackRouter.Handle("/status/", http.HandlerFunc(han.InstanceStatusMessageHandler)).Methods("POST", "OPTIONS")
callbackRouter.Handle("/status", http.HandlerFunc(han.InstanceStatusMessageHandler)).Methods("POST", "OPTIONS")
+ callbackRouter.Handle("/system-info/", http.HandlerFunc(han.InstanceSystemInfoHandler)).Methods("POST", "OPTIONS")
+ callbackRouter.Handle("/system-info", http.HandlerFunc(han.InstanceSystemInfoHandler)).Methods("POST", "OPTIONS")
callbackRouter.Use(instanceMiddleware.Middleware)
+ ///////////////////
+ // Metadata URLs //
+ ///////////////////
metadataRouter := apiSubRouter.PathPrefix("/metadata").Subrouter()
+ metadataRouter.Use(instanceMiddleware.Middleware)
+
+ // Registration token
metadataRouter.Handle("/runner-registration-token/", http.HandlerFunc(han.InstanceGithubRegistrationTokenHandler)).Methods("GET", "OPTIONS")
metadataRouter.Handle("/runner-registration-token", http.HandlerFunc(han.InstanceGithubRegistrationTokenHandler)).Methods("GET", "OPTIONS")
- metadataRouter.Use(instanceMiddleware.Middleware)
+ // JIT credential files
+ metadataRouter.Handle("/credentials/{fileName}/", http.HandlerFunc(han.JITCredentialsFileHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/credentials/{fileName}", http.HandlerFunc(han.JITCredentialsFileHandler)).Methods("GET", "OPTIONS")
+ // Systemd files
+ metadataRouter.Handle("/system/service-name/", http.HandlerFunc(han.SystemdServiceNameHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/system/service-name", http.HandlerFunc(han.SystemdServiceNameHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/systemd/unit-file/", http.HandlerFunc(han.SystemdUnitFileHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/systemd/unit-file", http.HandlerFunc(han.SystemdUnitFileHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/system/cert-bundle/", http.HandlerFunc(han.RootCertificateBundleHandler)).Methods("GET", "OPTIONS")
+ metadataRouter.Handle("/system/cert-bundle", http.HandlerFunc(han.RootCertificateBundleHandler)).Methods("GET", "OPTIONS")
+
// Login
authRouter := apiSubRouter.PathPrefix("/auth").Subrouter()
authRouter.Handle("/{login:login\\/?}", http.HandlerFunc(han.LoginHandler)).Methods("POST", "OPTIONS")
authRouter.Use(initMiddleware.Middleware)
+ //////////////////////////
+ // Controller endpoints //
+ //////////////////////////
+ controllerRouter := apiSubRouter.PathPrefix("/controller").Subrouter()
+ // The controller endpoints allow us to get information about the controller and update the URL endpoints.
+ // This endpoint must not be guarded by the urlsRequiredMiddleware as that would prevent the user from
+ // updating the URLs.
+ controllerRouter.Use(initMiddleware.Middleware)
+ controllerRouter.Use(authMiddleware.Middleware)
+ controllerRouter.Use(auth.AdminRequiredMiddleware)
+ // Get controller info
+ controllerRouter.Handle("/", http.HandlerFunc(han.ControllerInfoHandler)).Methods("GET", "OPTIONS")
+ controllerRouter.Handle("", http.HandlerFunc(han.ControllerInfoHandler)).Methods("GET", "OPTIONS")
+ // Update controller
+ controllerRouter.Handle("/", http.HandlerFunc(han.UpdateControllerHandler)).Methods("PUT", "OPTIONS")
+ controllerRouter.Handle("", http.HandlerFunc(han.UpdateControllerHandler)).Methods("PUT", "OPTIONS")
+
+ ////////////////////////////////////
+ // API router for everything else //
+ ////////////////////////////////////
apiRouter := apiSubRouter.PathPrefix("").Subrouter()
apiRouter.Use(initMiddleware.Middleware)
+ // all endpoints except the controller endpoint should return an error
+ // if the required metadata, callback and webhook URLs are not set.
+ apiRouter.Use(urlsRequiredMiddleware.Middleware)
apiRouter.Use(authMiddleware.Middleware)
+ apiRouter.Use(auth.AdminRequiredMiddleware)
+
+ // Legacy controller path
+ apiRouter.Handle("/controller-info/", http.HandlerFunc(han.ControllerInfoHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/controller-info", http.HandlerFunc(han.ControllerInfoHandler)).Methods("GET", "OPTIONS")
// Metrics Token
apiRouter.Handle("/metrics-token/", http.HandlerFunc(han.MetricsTokenHandler)).Methods("GET", "OPTIONS")
apiRouter.Handle("/metrics-token", http.HandlerFunc(han.MetricsTokenHandler)).Methods("GET", "OPTIONS")
+ //////////
+ // Jobs //
+ //////////
+ // List all jobs
+ apiRouter.Handle("/jobs/", http.HandlerFunc(han.ListAllJobs)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/jobs", http.HandlerFunc(han.ListAllJobs)).Methods("GET", "OPTIONS")
+
///////////
// Pools //
///////////
@@ -99,6 +240,25 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/pools/{poolID}/instances/", http.HandlerFunc(han.ListPoolInstancesHandler)).Methods("GET", "OPTIONS")
apiRouter.Handle("/pools/{poolID}/instances", http.HandlerFunc(han.ListPoolInstancesHandler)).Methods("GET", "OPTIONS")
+ ////////////////
+ // Scale sets //
+ ////////////////
+ // List all pools
+ apiRouter.Handle("/scalesets/", http.HandlerFunc(han.ListAllScaleSetsHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/scalesets", http.HandlerFunc(han.ListAllScaleSetsHandler)).Methods("GET", "OPTIONS")
+ // Get one pool
+ apiRouter.Handle("/scalesets/{scalesetID}/", http.HandlerFunc(han.GetScaleSetByIDHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/scalesets/{scalesetID}", http.HandlerFunc(han.GetScaleSetByIDHandler)).Methods("GET", "OPTIONS")
+ // Delete one pool
+ apiRouter.Handle("/scalesets/{scalesetID}/", http.HandlerFunc(han.DeleteScaleSetByIDHandler)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/scalesets/{scalesetID}", http.HandlerFunc(han.DeleteScaleSetByIDHandler)).Methods("DELETE", "OPTIONS")
+ // Update one pool
+ apiRouter.Handle("/scalesets/{scalesetID}/", http.HandlerFunc(han.UpdateScaleSetByIDHandler)).Methods("PUT", "OPTIONS")
+ apiRouter.Handle("/scalesets/{scalesetID}", http.HandlerFunc(han.UpdateScaleSetByIDHandler)).Methods("PUT", "OPTIONS")
+ // List pool instances
+ apiRouter.Handle("/scalesets/{scalesetID}/instances/", http.HandlerFunc(han.ListScaleSetInstancesHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/scalesets/{scalesetID}/instances", http.HandlerFunc(han.ListScaleSetInstancesHandler)).Methods("GET", "OPTIONS")
+
/////////////
// Runners //
/////////////
@@ -131,6 +291,14 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/repositories/{repoID}/pools/", http.HandlerFunc(han.CreateRepoPoolHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/repositories/{repoID}/pools", http.HandlerFunc(han.CreateRepoPoolHandler)).Methods("POST", "OPTIONS")
+ // Create scale set
+ apiRouter.Handle("/repositories/{repoID}/scalesets/", http.HandlerFunc(han.CreateRepoScaleSetHandler)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/repositories/{repoID}/scalesets", http.HandlerFunc(han.CreateRepoScaleSetHandler)).Methods("POST", "OPTIONS")
+
+ // List scale sets
+ apiRouter.Handle("/repositories/{repoID}/scalesets/", http.HandlerFunc(han.ListRepoScaleSetsHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/repositories/{repoID}/scalesets", http.HandlerFunc(han.ListRepoScaleSetsHandler)).Methods("GET", "OPTIONS")
+
// Repo instances list
apiRouter.Handle("/repositories/{repoID}/instances/", http.HandlerFunc(han.ListRepoInstancesHandler)).Methods("GET", "OPTIONS")
apiRouter.Handle("/repositories/{repoID}/instances", http.HandlerFunc(han.ListRepoInstancesHandler)).Methods("GET", "OPTIONS")
@@ -151,6 +319,17 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/repositories/", http.HandlerFunc(han.CreateRepoHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/repositories", http.HandlerFunc(han.CreateRepoHandler)).Methods("POST", "OPTIONS")
+ if manageWebhooks {
+ // Install Webhook
+ apiRouter.Handle("/repositories/{repoID}/webhook/", http.HandlerFunc(han.InstallRepoWebhookHandler)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/repositories/{repoID}/webhook", http.HandlerFunc(han.InstallRepoWebhookHandler)).Methods("POST", "OPTIONS")
+ // Uninstall Webhook
+ apiRouter.Handle("/repositories/{repoID}/webhook/", http.HandlerFunc(han.UninstallRepoWebhookHandler)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/repositories/{repoID}/webhook", http.HandlerFunc(han.UninstallRepoWebhookHandler)).Methods("DELETE", "OPTIONS")
+ // Get webhook info
+ apiRouter.Handle("/repositories/{repoID}/webhook/", http.HandlerFunc(han.GetRepoWebhookInfoHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/repositories/{repoID}/webhook", http.HandlerFunc(han.GetRepoWebhookInfoHandler)).Methods("GET", "OPTIONS")
+ }
/////////////////////////////
// Organizations and pools //
/////////////////////////////
@@ -170,7 +349,15 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/organizations/{orgID}/pools/", http.HandlerFunc(han.CreateOrgPoolHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/organizations/{orgID}/pools", http.HandlerFunc(han.CreateOrgPoolHandler)).Methods("POST", "OPTIONS")
- // Repo instances list
+ // Create org scale set
+ apiRouter.Handle("/organizations/{orgID}/scalesets/", http.HandlerFunc(han.CreateOrgScaleSetHandler)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/organizations/{orgID}/scalesets", http.HandlerFunc(han.CreateOrgScaleSetHandler)).Methods("POST", "OPTIONS")
+
+ // List org scale sets
+ apiRouter.Handle("/organizations/{orgID}/scalesets/", http.HandlerFunc(han.ListOrgScaleSetsHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/organizations/{orgID}/scalesets", http.HandlerFunc(han.ListOrgScaleSetsHandler)).Methods("GET", "OPTIONS")
+
+ // Org instances list
apiRouter.Handle("/organizations/{orgID}/instances/", http.HandlerFunc(han.ListOrgInstancesHandler)).Methods("GET", "OPTIONS")
apiRouter.Handle("/organizations/{orgID}/instances", http.HandlerFunc(han.ListOrgInstancesHandler)).Methods("GET", "OPTIONS")
@@ -190,6 +377,17 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/organizations/", http.HandlerFunc(han.CreateOrgHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/organizations", http.HandlerFunc(han.CreateOrgHandler)).Methods("POST", "OPTIONS")
+ if manageWebhooks {
+ // Install Webhook
+ apiRouter.Handle("/organizations/{orgID}/webhook/", http.HandlerFunc(han.InstallOrgWebhookHandler)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/organizations/{orgID}/webhook", http.HandlerFunc(han.InstallOrgWebhookHandler)).Methods("POST", "OPTIONS")
+ // Uninstall Webhook
+ apiRouter.Handle("/organizations/{orgID}/webhook/", http.HandlerFunc(han.UninstallOrgWebhookHandler)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/organizations/{orgID}/webhook", http.HandlerFunc(han.UninstallOrgWebhookHandler)).Methods("DELETE", "OPTIONS")
+ // Get webhook info
+ apiRouter.Handle("/organizations/{orgID}/webhook/", http.HandlerFunc(han.GetOrgWebhookInfoHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/organizations/{orgID}/webhook", http.HandlerFunc(han.GetOrgWebhookInfoHandler)).Methods("GET", "OPTIONS")
+ }
/////////////////////////////
// Enterprises and pools //
/////////////////////////////
@@ -209,33 +407,131 @@ func NewAPIRouter(han *controllers.APIController, logWriter io.Writer, authMiddl
apiRouter.Handle("/enterprises/{enterpriseID}/pools/", http.HandlerFunc(han.CreateEnterprisePoolHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/enterprises/{enterpriseID}/pools", http.HandlerFunc(han.CreateEnterprisePoolHandler)).Methods("POST", "OPTIONS")
- // Repo instances list
+ // Create enterprise scale sets
+ apiRouter.Handle("/enterprises/{enterpriseID}/scalesets/", http.HandlerFunc(han.CreateEnterpriseScaleSetHandler)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/enterprises/{enterpriseID}/scalesets", http.HandlerFunc(han.CreateEnterpriseScaleSetHandler)).Methods("POST", "OPTIONS")
+
+ // List enterprise scale sets
+ apiRouter.Handle("/enterprises/{enterpriseID}/scalesets/", http.HandlerFunc(han.ListEnterpriseScaleSetsHandler)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/enterprises/{enterpriseID}/scalesets", http.HandlerFunc(han.ListEnterpriseScaleSetsHandler)).Methods("GET", "OPTIONS")
+
+ // Enterprise instances list
apiRouter.Handle("/enterprises/{enterpriseID}/instances/", http.HandlerFunc(han.ListEnterpriseInstancesHandler)).Methods("GET", "OPTIONS")
apiRouter.Handle("/enterprises/{enterpriseID}/instances", http.HandlerFunc(han.ListEnterpriseInstancesHandler)).Methods("GET", "OPTIONS")
- // Get org
+ // Get enterprise
apiRouter.Handle("/enterprises/{enterpriseID}/", http.HandlerFunc(han.GetEnterpriseByIDHandler)).Methods("GET", "OPTIONS")
apiRouter.Handle("/enterprises/{enterpriseID}", http.HandlerFunc(han.GetEnterpriseByIDHandler)).Methods("GET", "OPTIONS")
- // Update org
+ // Update enterprise
apiRouter.Handle("/enterprises/{enterpriseID}/", http.HandlerFunc(han.UpdateEnterpriseHandler)).Methods("PUT", "OPTIONS")
apiRouter.Handle("/enterprises/{enterpriseID}", http.HandlerFunc(han.UpdateEnterpriseHandler)).Methods("PUT", "OPTIONS")
- // Delete org
+ // Delete enterprise
apiRouter.Handle("/enterprises/{enterpriseID}/", http.HandlerFunc(han.DeleteEnterpriseHandler)).Methods("DELETE", "OPTIONS")
apiRouter.Handle("/enterprises/{enterpriseID}", http.HandlerFunc(han.DeleteEnterpriseHandler)).Methods("DELETE", "OPTIONS")
- // List orgs
+ // List enterprises
apiRouter.Handle("/enterprises/", http.HandlerFunc(han.ListEnterprisesHandler)).Methods("GET", "OPTIONS")
apiRouter.Handle("/enterprises", http.HandlerFunc(han.ListEnterprisesHandler)).Methods("GET", "OPTIONS")
- // Create org
+ // Create enterprise
apiRouter.Handle("/enterprises/", http.HandlerFunc(han.CreateEnterpriseHandler)).Methods("POST", "OPTIONS")
apiRouter.Handle("/enterprises", http.HandlerFunc(han.CreateEnterpriseHandler)).Methods("POST", "OPTIONS")
- // Credentials and providers
- apiRouter.Handle("/credentials/", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
- apiRouter.Handle("/credentials", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
+ // Providers
apiRouter.Handle("/providers/", http.HandlerFunc(han.ListProviders)).Methods("GET", "OPTIONS")
apiRouter.Handle("/providers", http.HandlerFunc(han.ListProviders)).Methods("GET", "OPTIONS")
- // Websocket log writer
- apiRouter.Handle("/{ws:ws\\/?}", http.HandlerFunc(han.WSHandler)).Methods("GET")
+ //////////////////////
+ // Github Endpoints //
+ //////////////////////
+ // Create Github Endpoint
+ apiRouter.Handle("/github/endpoints/", http.HandlerFunc(han.CreateGithubEndpoint)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/github/endpoints", http.HandlerFunc(han.CreateGithubEndpoint)).Methods("POST", "OPTIONS")
+ // List Github Endpoints
+ apiRouter.Handle("/github/endpoints/", http.HandlerFunc(han.ListGithubEndpoints)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/github/endpoints", http.HandlerFunc(han.ListGithubEndpoints)).Methods("GET", "OPTIONS")
+ // Get Github Endpoint
+ apiRouter.Handle("/github/endpoints/{name}/", http.HandlerFunc(han.GetGithubEndpoint)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/github/endpoints/{name}", http.HandlerFunc(han.GetGithubEndpoint)).Methods("GET", "OPTIONS")
+ // Delete Github Endpoint
+ apiRouter.Handle("/github/endpoints/{name}/", http.HandlerFunc(han.DeleteGithubEndpoint)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/github/endpoints/{name}", http.HandlerFunc(han.DeleteGithubEndpoint)).Methods("DELETE", "OPTIONS")
+ // Update Github Endpoint
+ apiRouter.Handle("/github/endpoints/{name}/", http.HandlerFunc(han.UpdateGithubEndpoint)).Methods("PUT", "OPTIONS")
+ apiRouter.Handle("/github/endpoints/{name}", http.HandlerFunc(han.UpdateGithubEndpoint)).Methods("PUT", "OPTIONS")
+
+ ////////////////////////
+ // Github credentials //
+ ////////////////////////
+ // Legacy credentials path
+ apiRouter.Handle("/credentials/", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/credentials", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
+ // List Github Credentials
+ apiRouter.Handle("/github/credentials/", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/github/credentials", http.HandlerFunc(han.ListCredentials)).Methods("GET", "OPTIONS")
+ // Create Github Credentials
+ apiRouter.Handle("/github/credentials/", http.HandlerFunc(han.CreateGithubCredential)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/github/credentials", http.HandlerFunc(han.CreateGithubCredential)).Methods("POST", "OPTIONS")
+ // Get Github Credential
+ apiRouter.Handle("/github/credentials/{id}/", http.HandlerFunc(han.GetGithubCredential)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/github/credentials/{id}", http.HandlerFunc(han.GetGithubCredential)).Methods("GET", "OPTIONS")
+ // Delete Github Credential
+ apiRouter.Handle("/github/credentials/{id}/", http.HandlerFunc(han.DeleteGithubCredential)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/github/credentials/{id}", http.HandlerFunc(han.DeleteGithubCredential)).Methods("DELETE", "OPTIONS")
+ // Update Github Credential
+ apiRouter.Handle("/github/credentials/{id}/", http.HandlerFunc(han.UpdateGithubCredential)).Methods("PUT", "OPTIONS")
+ apiRouter.Handle("/github/credentials/{id}", http.HandlerFunc(han.UpdateGithubCredential)).Methods("PUT", "OPTIONS")
+
+ //////////////////////
+ // Gitea Endpoints //
+ //////////////////////
+ // Create Gitea Endpoint
+ apiRouter.Handle("/gitea/endpoints/", http.HandlerFunc(han.CreateGiteaEndpoint)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/gitea/endpoints", http.HandlerFunc(han.CreateGiteaEndpoint)).Methods("POST", "OPTIONS")
+ // List Gitea Endpoints
+ apiRouter.Handle("/gitea/endpoints/", http.HandlerFunc(han.ListGiteaEndpoints)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/gitea/endpoints", http.HandlerFunc(han.ListGiteaEndpoints)).Methods("GET", "OPTIONS")
+ // Get Gitea Endpoint
+ apiRouter.Handle("/gitea/endpoints/{name}/", http.HandlerFunc(han.GetGiteaEndpoint)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/gitea/endpoints/{name}", http.HandlerFunc(han.GetGiteaEndpoint)).Methods("GET", "OPTIONS")
+ // Delete Gitea Endpoint
+ apiRouter.Handle("/gitea/endpoints/{name}/", http.HandlerFunc(han.DeleteGiteaEndpoint)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/gitea/endpoints/{name}", http.HandlerFunc(han.DeleteGiteaEndpoint)).Methods("DELETE", "OPTIONS")
+ // Update Gitea Endpoint
+ apiRouter.Handle("/gitea/endpoints/{name}/", http.HandlerFunc(han.UpdateGiteaEndpoint)).Methods("PUT", "OPTIONS")
+ apiRouter.Handle("/gitea/endpoints/{name}", http.HandlerFunc(han.UpdateGiteaEndpoint)).Methods("PUT", "OPTIONS")
+
+ ////////////////////////
+ // Gitea credentials //
+ ////////////////////////
+ // List Gitea Credentials
+ apiRouter.Handle("/gitea/credentials/", http.HandlerFunc(han.ListGiteaCredentials)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/gitea/credentials", http.HandlerFunc(han.ListGiteaCredentials)).Methods("GET", "OPTIONS")
+ // Create Gitea Credentials
+ apiRouter.Handle("/gitea/credentials/", http.HandlerFunc(han.CreateGiteaCredential)).Methods("POST", "OPTIONS")
+ apiRouter.Handle("/gitea/credentials", http.HandlerFunc(han.CreateGiteaCredential)).Methods("POST", "OPTIONS")
+ // Get Gitea Credential
+ apiRouter.Handle("/gitea/credentials/{id}/", http.HandlerFunc(han.GetGiteaCredential)).Methods("GET", "OPTIONS")
+ apiRouter.Handle("/gitea/credentials/{id}", http.HandlerFunc(han.GetGiteaCredential)).Methods("GET", "OPTIONS")
+ // Delete Gitea Credential
+ apiRouter.Handle("/gitea/credentials/{id}/", http.HandlerFunc(han.DeleteGiteaCredential)).Methods("DELETE", "OPTIONS")
+ apiRouter.Handle("/gitea/credentials/{id}", http.HandlerFunc(han.DeleteGiteaCredential)).Methods("DELETE", "OPTIONS")
+ // Update Gitea Credential
+ apiRouter.Handle("/gitea/credentials/{id}/", http.HandlerFunc(han.UpdateGiteaCredential)).Methods("PUT", "OPTIONS")
+ apiRouter.Handle("/gitea/credentials/{id}", http.HandlerFunc(han.UpdateGiteaCredential)).Methods("PUT", "OPTIONS")
+
+ /////////////////////////
+ // Websocket endpoints //
+ /////////////////////////
+ // Legacy log websocket path
+ apiRouter.Handle("/ws/", http.HandlerFunc(han.WSHandler)).Methods("GET")
+ apiRouter.Handle("/ws", http.HandlerFunc(han.WSHandler)).Methods("GET")
+ // Log websocket endpoint
+ apiRouter.Handle("/ws/logs/", http.HandlerFunc(han.WSHandler)).Methods("GET")
+ apiRouter.Handle("/ws/logs", http.HandlerFunc(han.WSHandler)).Methods("GET")
+ // DB watcher websocket endpoint
+ apiRouter.Handle("/ws/events/", http.HandlerFunc(han.EventsHandler)).Methods("GET")
+ apiRouter.Handle("/ws/events", http.HandlerFunc(han.EventsHandler)).Methods("GET")
+
+ // NotFound handler - this should be last
+ apiRouter.PathPrefix("/").HandlerFunc(han.NotFoundHandler).Methods("GET", "POST", "PUT", "DELETE", "OPTIONS")
return router
}
diff --git a/apiserver/swagger-models.yaml b/apiserver/swagger-models.yaml
new file mode 100644
index 00000000..74eaac84
--- /dev/null
+++ b/apiserver/swagger-models.yaml
@@ -0,0 +1,345 @@
+# NOTE: The purpose of these definitions is to reuse the existing golang
+# types from GARM packages.
+definitions:
+ User:
+ type: object
+ x-go-type:
+ type: User
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ HookInfo:
+ type: object
+ x-go-type:
+ type: HookInfo
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ ControllerInfo:
+ type: object
+ x-go-type:
+ type: ControllerInfo
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ InstallWebhookParams:
+ type: object
+ x-go-type:
+ type: InstallWebhookParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ NewUserParams:
+ type: object
+ x-go-type:
+ type: NewUserParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ PasswordLoginParams:
+ type: object
+ x-go-type:
+ type: PasswordLoginParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ JWTResponse:
+ type: object
+ x-go-type:
+ type: JWTResponse
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ Jobs:
+ type: array
+ x-go-type:
+ type: Jobs
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/Job'
+ Job:
+ type: object
+ x-go-type:
+ type: Job
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ Credentials:
+ type: array
+ x-go-type:
+ type: Credentials
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/ForgeCredentials'
+ ForgeCredentials:
+ type: object
+ x-go-type:
+ type: ForgeCredentials
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ Providers:
+ type: array
+ x-go-type:
+ type: Providers
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/Provider'
+ Provider:
+ type: object
+ x-go-type:
+ type: Provider
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ Instances:
+ type: array
+ x-go-type:
+ type: Instances
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/Instance'
+ Instance:
+ type: object
+ x-go-type:
+ type: Instance
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ Pools:
+ type: array
+ x-go-type:
+ type: Pools
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/Pool'
+ Pool:
+ type: object
+ x-go-type:
+ type: Pool
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ ScaleSets:
+ type: array
+ x-go-type:
+ type: ScaleSets
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/ScaleSet'
+ ScaleSet:
+ type: object
+ x-go-type:
+ type: ScaleSet
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ Repositories:
+ type: array
+ x-go-type:
+ type: Repositories
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/Repository'
+ Repository:
+ type: object
+ x-go-type:
+ type: Repository
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ CreateRepoParams:
+ type: object
+ x-go-type:
+ type: CreateRepoParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ Organizations:
+ type: array
+ x-go-type:
+ type: Organizations
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/Organization'
+ Organization:
+ type: object
+ x-go-type:
+ type: Organization
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ CreateOrgParams:
+ type: object
+ x-go-type:
+ type: CreateOrgParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ Enterprises:
+ type: array
+ x-go-type:
+ type: Enterprises
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/Enterprise'
+ Enterprise:
+ type: object
+ x-go-type:
+ type: Enterprise
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ CreateEnterpriseParams:
+ type: object
+ x-go-type:
+ type: CreateEnterpriseParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateEntityParams:
+ type: object
+ x-go-type:
+ type: UpdateEntityParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ CreatePoolParams:
+ type: object
+ x-go-type:
+ type: CreatePoolParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ CreateScaleSetParams:
+ type: object
+ x-go-type:
+ type: CreateScaleSetParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdatePoolParams:
+ type: object
+ x-go-type:
+ type: UpdatePoolParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateScaleSetParams:
+ type: object
+ x-go-type:
+ type: UpdateScaleSetParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ APIErrorResponse:
+ type: object
+ x-go-type:
+ type: APIErrorResponse
+ import:
+ package: github.com/cloudbase/garm/apiserver/params
+ alias: apiserver_params
+ CreateInstanceParams:
+ type: object
+ x-go-type:
+ type: CreateInstanceParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateGithubEndpointParams:
+ type: object
+ x-go-type:
+ type: UpdateGithubEndpointParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateGiteaEndpointParams:
+ type: object
+ x-go-type:
+ type: UpdateGiteaEndpointParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ ForgeEndpoint:
+ type: object
+ x-go-type:
+ type: ForgeEndpoint
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ ForgeEndpoints:
+ type: array
+ x-go-type:
+ type: ForgeEndpoints
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ items:
+ $ref: '#/definitions/ForgeEndpoint'
+ CreateGithubEndpointParams:
+ type: object
+ x-go-type:
+ type: CreateGithubEndpointParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ CreateGiteaEndpointParams:
+ type: object
+ x-go-type:
+ type: CreateGiteaEndpointParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ CreateGithubCredentialsParams:
+ type: object
+ x-go-type:
+ type: CreateGithubCredentialsParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ CreateGiteaCredentialsParams:
+ type: object
+ x-go-type:
+ type: CreateGiteaCredentialsParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateGithubCredentialsParams:
+ type: object
+ x-go-type:
+ type: UpdateGithubCredentialsParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateGiteaCredentialsParams:
+ type: object
+ x-go-type:
+ type: UpdateGiteaCredentialsParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
+ UpdateControllerParams:
+ type: object
+ x-go-type:
+ type: UpdateControllerParams
+ import:
+ package: github.com/cloudbase/garm/params
+ alias: garm_params
diff --git a/apiserver/swagger.yaml b/apiserver/swagger.yaml
new file mode 100644
index 00000000..bf02a2d7
--- /dev/null
+++ b/apiserver/swagger.yaml
@@ -0,0 +1,2280 @@
+basePath: /api/v1
+consumes:
+ - application/json
+definitions:
+ APIErrorResponse:
+ type: object
+ x-go-type:
+ import:
+ alias: apiserver_params
+ package: github.com/cloudbase/garm/apiserver/params
+ type: APIErrorResponse
+ ControllerInfo:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: ControllerInfo
+ CreateEnterpriseParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateEnterpriseParams
+ CreateGiteaCredentialsParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateGiteaCredentialsParams
+ CreateGiteaEndpointParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateGiteaEndpointParams
+ CreateGithubCredentialsParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateGithubCredentialsParams
+ CreateGithubEndpointParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateGithubEndpointParams
+ CreateInstanceParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateInstanceParams
+ CreateOrgParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateOrgParams
+ CreatePoolParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreatePoolParams
+ CreateRepoParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateRepoParams
+ CreateScaleSetParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: CreateScaleSetParams
+ Credentials:
+ items:
+ $ref: '#/definitions/ForgeCredentials'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Credentials
+ Enterprise:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Enterprise
+ Enterprises:
+ items:
+ $ref: '#/definitions/Enterprise'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Enterprises
+ ForgeCredentials:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: ForgeCredentials
+ ForgeEndpoint:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: ForgeEndpoint
+ ForgeEndpoints:
+ items:
+ $ref: '#/definitions/ForgeEndpoint'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: ForgeEndpoints
+ HookInfo:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: HookInfo
+ InstallWebhookParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: InstallWebhookParams
+ Instance:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Instance
+ Instances:
+ items:
+ $ref: '#/definitions/Instance'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Instances
+ JWTResponse:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: JWTResponse
+ Job:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Job
+ Jobs:
+ items:
+ $ref: '#/definitions/Job'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Jobs
+ NewUserParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: NewUserParams
+ Organization:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Organization
+ Organizations:
+ items:
+ $ref: '#/definitions/Organization'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Organizations
+ PasswordLoginParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: PasswordLoginParams
+ Pool:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Pool
+ Pools:
+ items:
+ $ref: '#/definitions/Pool'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Pools
+ Provider:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Provider
+ Providers:
+ items:
+ $ref: '#/definitions/Provider'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Providers
+ Repositories:
+ items:
+ $ref: '#/definitions/Repository'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Repositories
+ Repository:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: Repository
+ ScaleSet:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: ScaleSet
+ ScaleSets:
+ items:
+ $ref: '#/definitions/ScaleSet'
+ type: array
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: ScaleSets
+ UpdateControllerParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateControllerParams
+ UpdateEntityParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateEntityParams
+ UpdateGiteaCredentialsParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateGiteaCredentialsParams
+ UpdateGiteaEndpointParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateGiteaEndpointParams
+ UpdateGithubCredentialsParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateGithubCredentialsParams
+ UpdateGithubEndpointParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateGithubEndpointParams
+ UpdatePoolParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdatePoolParams
+ UpdateScaleSetParams:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: UpdateScaleSetParams
+ User:
+ type: object
+ x-go-type:
+ import:
+ alias: garm_params
+ package: github.com/cloudbase/garm/params
+ type: User
+info:
+ description: The Garm API generated using go-swagger.
+ license:
+ name: Apache 2.0
+ url: https://www.apache.org/licenses/LICENSE-2.0
+ title: Garm API.
+ version: 1.0.0
+paths:
+ /auth/login:
+ post:
+ operationId: Login
+ parameters:
+ - description: Login information.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/PasswordLoginParams'
+ description: Login information.
+ type: object
+ responses:
+ "200":
+ description: JWTResponse
+ schema:
+ $ref: '#/definitions/JWTResponse'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Logs in a user and returns a JWT token.
+ tags:
+ - login
+ /controller:
+ put:
+ operationId: UpdateController
+ parameters:
+ - description: Parameters used when updating the controller.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateControllerParams'
+ description: Parameters used when updating the controller.
+ type: object
+ responses:
+ "200":
+ description: ControllerInfo
+ schema:
+ $ref: '#/definitions/ControllerInfo'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update controller.
+ tags:
+ - controller
+ /controller-info:
+ get:
+ operationId: ControllerInfo
+ responses:
+ "200":
+ description: ControllerInfo
+ schema:
+ $ref: '#/definitions/ControllerInfo'
+ "409":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get controller info.
+ tags:
+ - controllerInfo
+ /enterprises:
+ get:
+ operationId: ListEnterprises
+ parameters:
+ - description: Exact enterprise name to filter by
+ in: query
+ name: name
+ type: string
+ - description: Exact endpoint name to filter by
+ in: query
+ name: endpoint
+ type: string
+ responses:
+ "200":
+ description: Enterprises
+ schema:
+ $ref: '#/definitions/Enterprises'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all enterprises.
+ tags:
+ - enterprises
+ post:
+ operationId: CreateEnterprise
+ parameters:
+ - description: Parameters used to create the enterprise.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateEnterpriseParams'
+ description: Parameters used to create the enterprise.
+ type: object
+ responses:
+ "200":
+ description: Enterprise
+ schema:
+ $ref: '#/definitions/Enterprise'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create enterprise with the given parameters.
+ tags:
+ - enterprises
+ /enterprises/{enterpriseID}:
+ delete:
+ operationId: DeleteEnterprise
+ parameters:
+ - description: ID of the enterprise to delete.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete enterprise by ID.
+ tags:
+ - enterprises
+ get:
+ operationId: GetEnterprise
+ parameters:
+ - description: The ID of the enterprise to fetch.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Enterprise
+ schema:
+ $ref: '#/definitions/Enterprise'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get enterprise by ID.
+ tags:
+ - enterprises
+ put:
+ operationId: UpdateEnterprise
+ parameters:
+ - description: The ID of the enterprise to update.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ - description: Parameters used when updating the enterprise.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateEntityParams'
+ description: Parameters used when updating the enterprise.
+ type: object
+ responses:
+ "200":
+ description: Enterprise
+ schema:
+ $ref: '#/definitions/Enterprise'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update enterprise with the given parameters.
+ tags:
+ - enterprises
+ /enterprises/{enterpriseID}/instances:
+ get:
+ operationId: ListEnterpriseInstances
+ parameters:
+ - description: Enterprise ID.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Instances
+ schema:
+ $ref: '#/definitions/Instances'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List enterprise instances.
+ tags:
+ - enterprises
+ - instances
+ /enterprises/{enterpriseID}/pools:
+ get:
+ operationId: ListEnterprisePools
+ parameters:
+ - description: Enterprise ID.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Pools
+ schema:
+ $ref: '#/definitions/Pools'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List enterprise pools.
+ tags:
+ - enterprises
+ - pools
+ post:
+ operationId: CreateEnterprisePool
+ parameters:
+ - description: Enterprise ID.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ - description: Parameters used when creating the enterprise pool.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreatePoolParams'
+ description: Parameters used when creating the enterprise pool.
+ type: object
+ responses:
+ "200":
+ description: Pool
+ schema:
+ $ref: '#/definitions/Pool'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create enterprise pool with the parameters given.
+ tags:
+ - enterprises
+ - pools
+ /enterprises/{enterpriseID}/pools/{poolID}:
+ delete:
+ operationId: DeleteEnterprisePool
+ parameters:
+ - description: Enterprise ID.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ - description: ID of the enterprise pool to delete.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete enterprise pool by ID.
+ tags:
+ - enterprises
+ - pools
+ get:
+ operationId: GetEnterprisePool
+ parameters:
+ - description: Enterprise ID.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ - description: Pool ID.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Pool
+ schema:
+ $ref: '#/definitions/Pool'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get enterprise pool by ID.
+ tags:
+ - enterprises
+ - pools
+ put:
+ operationId: UpdateEnterprisePool
+ parameters:
+ - description: Enterprise ID.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ - description: ID of the enterprise pool to update.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ - description: Parameters used when updating the enterprise pool.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdatePoolParams'
+ description: Parameters used when updating the enterprise pool.
+ type: object
+ responses:
+ "200":
+ description: Pool
+ schema:
+ $ref: '#/definitions/Pool'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update enterprise pool with the parameters given.
+ tags:
+ - enterprises
+ - pools
+ /enterprises/{enterpriseID}/scalesets:
+ get:
+ operationId: ListEnterpriseScaleSets
+ parameters:
+ - description: Enterprise ID.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ScaleSets
+ schema:
+ $ref: '#/definitions/ScaleSets'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List enterprise scale sets.
+ tags:
+ - enterprises
+ - scalesets
+ post:
+ operationId: CreateEnterpriseScaleSet
+ parameters:
+ - description: Enterprise ID.
+ in: path
+ name: enterpriseID
+ required: true
+ type: string
+ - description: Parameters used when creating the enterprise scale set.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateScaleSetParams'
+ description: Parameters used when creating the enterprise scale set.
+ type: object
+ responses:
+ "200":
+ description: ScaleSet
+ schema:
+ $ref: '#/definitions/ScaleSet'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create enterprise pool with the parameters given.
+ tags:
+ - enterprises
+ - scalesets
+ /first-run:
+ post:
+ operationId: FirstRun
+ parameters:
+ - description: Create a new user.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/NewUserParams'
+ description: Create a new user.
+ type: object
+ responses:
+ "200":
+ description: User
+ schema:
+ $ref: '#/definitions/User'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Initialize the first run of the controller.
+ tags:
+ - first-run
+ /gitea/credentials:
+ get:
+ operationId: ListGiteaCredentials
+ responses:
+ "200":
+ description: Credentials
+ schema:
+ $ref: '#/definitions/Credentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all credentials.
+ tags:
+ - credentials
+ post:
+ operationId: CreateGiteaCredentials
+ parameters:
+ - description: Parameters used when creating a Gitea credential.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateGiteaCredentialsParams'
+ description: Parameters used when creating a Gitea credential.
+ type: object
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create a Gitea credential.
+ tags:
+ - credentials
+ /gitea/credentials/{id}:
+ delete:
+ operationId: DeleteGiteaCredentials
+ parameters:
+ - description: ID of the Gitea credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete a Gitea credential.
+ tags:
+ - credentials
+ get:
+ operationId: GetGiteaCredentials
+ parameters:
+ - description: ID of the Gitea credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get a Gitea credential.
+ tags:
+ - credentials
+ put:
+ operationId: UpdateGiteaCredentials
+ parameters:
+ - description: ID of the Gitea credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ - description: Parameters used when updating a Gitea credential.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateGiteaCredentialsParams'
+ description: Parameters used when updating a Gitea credential.
+ type: object
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update a Gitea credential.
+ tags:
+ - credentials
+ /gitea/endpoints:
+ get:
+ operationId: ListGiteaEndpoints
+ responses:
+ "200":
+ description: ForgeEndpoints
+ schema:
+ $ref: '#/definitions/ForgeEndpoints'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all Gitea Endpoints.
+ tags:
+ - endpoints
+ post:
+ operationId: CreateGiteaEndpoint
+ parameters:
+ - description: Parameters used when creating a Gitea endpoint.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateGiteaEndpointParams'
+ description: Parameters used when creating a Gitea endpoint.
+ type: object
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create a Gitea Endpoint.
+ tags:
+ - endpoints
+ /gitea/endpoints/{name}:
+ delete:
+ operationId: DeleteGiteaEndpoint
+ parameters:
+ - description: The name of the Gitea endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete a Gitea Endpoint.
+ tags:
+ - endpoints
+ get:
+ operationId: GetGiteaEndpoint
+ parameters:
+ - description: The name of the Gitea endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get a Gitea Endpoint.
+ tags:
+ - endpoints
+ put:
+ operationId: UpdateGiteaEndpoint
+ parameters:
+ - description: The name of the Gitea endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ - description: Parameters used when updating a Gitea endpoint.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateGiteaEndpointParams'
+ description: Parameters used when updating a Gitea endpoint.
+ type: object
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update a Gitea Endpoint.
+ tags:
+ - endpoints
+ /github/credentials:
+ get:
+ operationId: ListCredentials
+ responses:
+ "200":
+ description: Credentials
+ schema:
+ $ref: '#/definitions/Credentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all credentials.
+ tags:
+ - credentials
+ post:
+ operationId: CreateCredentials
+ parameters:
+ - description: Parameters used when creating a GitHub credential.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateGithubCredentialsParams'
+ description: Parameters used when creating a GitHub credential.
+ type: object
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create a GitHub credential.
+ tags:
+ - credentials
+ /github/credentials/{id}:
+ delete:
+ operationId: DeleteCredentials
+ parameters:
+ - description: ID of the GitHub credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete a GitHub credential.
+ tags:
+ - credentials
+ get:
+ operationId: GetCredentials
+ parameters:
+ - description: ID of the GitHub credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get a GitHub credential.
+ tags:
+ - credentials
+ put:
+ operationId: UpdateCredentials
+ parameters:
+ - description: ID of the GitHub credential.
+ in: path
+ name: id
+ required: true
+ type: integer
+ - description: Parameters used when updating a GitHub credential.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateGithubCredentialsParams'
+ description: Parameters used when updating a GitHub credential.
+ type: object
+ responses:
+ "200":
+ description: ForgeCredentials
+ schema:
+ $ref: '#/definitions/ForgeCredentials'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update a GitHub credential.
+ tags:
+ - credentials
+ /github/endpoints:
+ get:
+ operationId: ListGithubEndpoints
+ responses:
+ "200":
+ description: ForgeEndpoints
+ schema:
+ $ref: '#/definitions/ForgeEndpoints'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all GitHub Endpoints.
+ tags:
+ - endpoints
+ post:
+ operationId: CreateGithubEndpoint
+ parameters:
+ - description: Parameters used when creating a GitHub endpoint.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateGithubEndpointParams'
+ description: Parameters used when creating a GitHub endpoint.
+ type: object
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create a GitHub Endpoint.
+ tags:
+ - endpoints
+ /github/endpoints/{name}:
+ delete:
+ operationId: DeleteGithubEndpoint
+ parameters:
+ - description: The name of the GitHub endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete a GitHub Endpoint.
+ tags:
+ - endpoints
+ get:
+ operationId: GetGithubEndpoint
+ parameters:
+ - description: The name of the GitHub endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get a GitHub Endpoint.
+ tags:
+ - endpoints
+ put:
+ operationId: UpdateGithubEndpoint
+ parameters:
+ - description: The name of the GitHub endpoint.
+ in: path
+ name: name
+ required: true
+ type: string
+ - description: Parameters used when updating a GitHub endpoint.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateGithubEndpointParams'
+ description: Parameters used when updating a GitHub endpoint.
+ type: object
+ responses:
+ "200":
+ description: ForgeEndpoint
+ schema:
+ $ref: '#/definitions/ForgeEndpoint'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update a GitHub Endpoint.
+ tags:
+ - endpoints
+ /instances:
+ get:
+ operationId: ListInstances
+ responses:
+ "200":
+ description: Instances
+ schema:
+ $ref: '#/definitions/Instances'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get all runners' instances.
+ tags:
+ - instances
+ /instances/{instanceName}:
+ delete:
+ operationId: DeleteInstance
+ parameters:
+ - description: Runner instance name.
+ in: path
+ name: instanceName
+ required: true
+ type: string
+ - description: If true GARM will ignore any provider error when removing the runner and will continue to remove the runner from github and the GARM database.
+ in: query
+ name: forceRemove
+ type: boolean
+ - description: If true GARM will ignore unauthorized errors returned by GitHub when removing a runner. This is useful if you want to clean up runners and your credentials have expired.
+ in: query
+ name: bypassGHUnauthorized
+ type: boolean
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete runner instance by name.
+ tags:
+ - instances
+ get:
+ operationId: GetInstance
+ parameters:
+ - description: Runner instance name.
+ in: path
+ name: instanceName
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Instance
+ schema:
+ $ref: '#/definitions/Instance'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get runner instance by name.
+ tags:
+ - instances
+ /jobs:
+ get:
+ operationId: ListJobs
+ responses:
+ "200":
+ description: Jobs
+ schema:
+ $ref: '#/definitions/Jobs'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all jobs.
+ tags:
+ - jobs
+ /metrics-token:
+ get:
+ operationId: GetMetricsToken
+ responses:
+ "200":
+ description: JWTResponse
+ schema:
+ $ref: '#/definitions/JWTResponse'
+ "401":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Returns a JWT token that can be used to access the metrics endpoint.
+ tags:
+ - metrics-token
+ /organizations:
+ get:
+ operationId: ListOrgs
+ parameters:
+ - description: Exact organization name to filter by
+ in: query
+ name: name
+ type: string
+ - description: Exact endpoint name to filter by
+ in: query
+ name: endpoint
+ type: string
+ responses:
+ "200":
+ description: Organizations
+ schema:
+ $ref: '#/definitions/Organizations'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List organizations.
+ tags:
+ - organizations
+ post:
+ operationId: CreateOrg
+ parameters:
+ - description: Parameters used when creating the organization.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateOrgParams'
+ description: Parameters used when creating the organization.
+ type: object
+ responses:
+ "200":
+ description: Organization
+ schema:
+ $ref: '#/definitions/Organization'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create organization with the parameters given.
+ tags:
+ - organizations
+ /organizations/{orgID}:
+ delete:
+ operationId: DeleteOrg
+ parameters:
+ - description: ID of the organization to delete.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ - description: If true and a webhook is installed for this organization, it will not be removed.
+ in: query
+ name: keepWebhook
+ type: boolean
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete organization by ID.
+ tags:
+ - organizations
+ get:
+ operationId: GetOrg
+ parameters:
+ - description: ID of the organization to fetch.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Organization
+ schema:
+ $ref: '#/definitions/Organization'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get organization by ID.
+ tags:
+ - organizations
+ put:
+ operationId: UpdateOrg
+ parameters:
+ - description: ID of the organization to update.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ - description: Parameters used when updating the organization.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateEntityParams'
+ description: Parameters used when updating the organization.
+ type: object
+ responses:
+ "200":
+ description: Organization
+ schema:
+ $ref: '#/definitions/Organization'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update organization with the parameters given.
+ tags:
+ - organizations
+ /organizations/{orgID}/instances:
+ get:
+ operationId: ListOrgInstances
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Instances
+ schema:
+ $ref: '#/definitions/Instances'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List organization instances.
+ tags:
+ - organizations
+ - instances
+ /organizations/{orgID}/pools:
+ get:
+ operationId: ListOrgPools
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Pools
+ schema:
+ $ref: '#/definitions/Pools'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List organization pools.
+ tags:
+ - organizations
+ - pools
+ post:
+ operationId: CreateOrgPool
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ - description: Parameters used when creating the organization pool.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreatePoolParams'
+ description: Parameters used when creating the organization pool.
+ type: object
+ responses:
+ "200":
+ description: Pool
+ schema:
+ $ref: '#/definitions/Pool'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create organization pool with the parameters given.
+ tags:
+ - organizations
+ - pools
+ /organizations/{orgID}/pools/{poolID}:
+ delete:
+ operationId: DeleteOrgPool
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ - description: ID of the organization pool to delete.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete organization pool by ID.
+ tags:
+ - organizations
+ - pools
+ get:
+ operationId: GetOrgPool
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ - description: Pool ID.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Pool
+ schema:
+ $ref: '#/definitions/Pool'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get organization pool by ID.
+ tags:
+ - organizations
+ - pools
+ put:
+ operationId: UpdateOrgPool
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ - description: ID of the organization pool to update.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ - description: Parameters used when updating the organization pool.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdatePoolParams'
+ description: Parameters used when updating the organization pool.
+ type: object
+ responses:
+ "200":
+ description: Pool
+ schema:
+ $ref: '#/definitions/Pool'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update organization pool with the parameters given.
+ tags:
+ - organizations
+ - pools
+ /organizations/{orgID}/scalesets:
+ get:
+ operationId: ListOrgScaleSets
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ScaleSets
+ schema:
+ $ref: '#/definitions/ScaleSets'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List organization scale sets.
+ tags:
+ - organizations
+ - scalesets
+ post:
+ operationId: CreateOrgScaleSet
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ - description: Parameters used when creating the organization scale set.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateScaleSetParams'
+ description: Parameters used when creating the organization scale set.
+ type: object
+ responses:
+ "200":
+ description: ScaleSet
+ schema:
+ $ref: '#/definitions/ScaleSet'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create organization scale set with the parameters given.
+ tags:
+ - organizations
+ - scalesets
+ /organizations/{orgID}/webhook:
+ delete:
+ operationId: UninstallOrgWebhook
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Uninstall organization webhook.
+ tags:
+ - organizations
+ - hooks
+ get:
+ operationId: GetOrgWebhookInfo
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: HookInfo
+ schema:
+ $ref: '#/definitions/HookInfo'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get information about the GARM installed webhook on an organization.
+ tags:
+ - organizations
+ - hooks
+ post:
+ description: |-
+ Install the GARM webhook for an organization. The secret configured on the organization will
+ be used to validate the requests.
+ operationId: InstallOrgWebhook
+ parameters:
+ - description: Organization ID.
+ in: path
+ name: orgID
+ required: true
+ type: string
+ - description: Parameters used when creating the organization webhook.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/InstallWebhookParams'
+ description: Parameters used when creating the organization webhook.
+ type: object
+ responses:
+ "200":
+ description: HookInfo
+ schema:
+ $ref: '#/definitions/HookInfo'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ tags:
+ - organizations
+ - hooks
+ /pools:
+ get:
+ operationId: ListPools
+ responses:
+ "200":
+ description: Pools
+ schema:
+ $ref: '#/definitions/Pools'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all pools.
+ tags:
+ - pools
+ /pools/{poolID}:
+ delete:
+ operationId: DeletePool
+ parameters:
+ - description: ID of the pool to delete.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete pool by ID.
+ tags:
+ - pools
+ get:
+ operationId: GetPool
+ parameters:
+ - description: ID of the pool to fetch.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Pool
+ schema:
+ $ref: '#/definitions/Pool'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get pool by ID.
+ tags:
+ - pools
+ put:
+ operationId: UpdatePool
+ parameters:
+ - description: ID of the pool to update.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ - description: Parameters to update the pool with.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdatePoolParams'
+ description: Parameters to update the pool with.
+ type: object
+ responses:
+ "200":
+ description: Pool
+ schema:
+ $ref: '#/definitions/Pool'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update pool by ID.
+ tags:
+ - pools
+ /pools/{poolID}/instances:
+ get:
+ operationId: ListPoolInstances
+ parameters:
+ - description: Runner pool ID.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Instances
+ schema:
+ $ref: '#/definitions/Instances'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List runner instances in a pool.
+ tags:
+ - instances
+ /providers:
+ get:
+ operationId: ListProviders
+ responses:
+ "200":
+ description: Providers
+ schema:
+ $ref: '#/definitions/Providers'
+ "400":
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all providers.
+ tags:
+ - providers
+ /repositories:
+ get:
+ operationId: ListRepos
+ parameters:
+ - description: Exact owner name to filter by
+ in: query
+ name: owner
+ type: string
+ - description: Exact repository name to filter by
+ in: query
+ name: name
+ type: string
+ - description: Exact endpoint name to filter by
+ in: query
+ name: endpoint
+ type: string
+ responses:
+ "200":
+ description: Repositories
+ schema:
+ $ref: '#/definitions/Repositories'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List repositories.
+ tags:
+ - repositories
+ post:
+ operationId: CreateRepo
+ parameters:
+ - description: Parameters used when creating the repository.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateRepoParams'
+ description: Parameters used when creating the repository.
+ type: object
+ responses:
+ "200":
+ description: Repository
+ schema:
+ $ref: '#/definitions/Repository'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create repository with the parameters given.
+ tags:
+ - repositories
+ /repositories/{repoID}:
+ delete:
+ operationId: DeleteRepo
+ parameters:
+ - description: ID of the repository to delete.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ - description: If true and a webhook is installed for this repo, it will not be removed.
+ in: query
+ name: keepWebhook
+ type: boolean
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete repository by ID.
+ tags:
+ - repositories
+ get:
+ operationId: GetRepo
+ parameters:
+ - description: ID of the repository to fetch.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Repository
+ schema:
+ $ref: '#/definitions/Repository'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get repository by ID.
+ tags:
+ - repositories
+ put:
+ operationId: UpdateRepo
+ parameters:
+ - description: ID of the repository to update.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ - description: Parameters used when updating the repository.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateEntityParams'
+ description: Parameters used when updating the repository.
+ type: object
+ responses:
+ "200":
+ description: Repository
+ schema:
+ $ref: '#/definitions/Repository'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update repository with the parameters given.
+ tags:
+ - repositories
+ /repositories/{repoID}/instances:
+ get:
+ operationId: ListRepoInstances
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Instances
+ schema:
+ $ref: '#/definitions/Instances'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List repository instances.
+ tags:
+ - repositories
+ - instances
+ /repositories/{repoID}/pools:
+ get:
+ operationId: ListRepoPools
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Pools
+ schema:
+ $ref: '#/definitions/Pools'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List repository pools.
+ tags:
+ - repositories
+ - pools
+ post:
+ operationId: CreateRepoPool
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ - description: Parameters used when creating the repository pool.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreatePoolParams'
+ description: Parameters used when creating the repository pool.
+ type: object
+ responses:
+ "200":
+ description: Pool
+ schema:
+ $ref: '#/definitions/Pool'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create repository pool with the parameters given.
+ tags:
+ - repositories
+ - pools
+ /repositories/{repoID}/pools/{poolID}:
+ delete:
+ operationId: DeleteRepoPool
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ - description: ID of the repository pool to delete.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete repository pool by ID.
+ tags:
+ - repositories
+ - pools
+ get:
+ operationId: GetRepoPool
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ - description: Pool ID.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Pool
+ schema:
+ $ref: '#/definitions/Pool'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get repository pool by ID.
+ tags:
+ - repositories
+ - pools
+ put:
+ operationId: UpdateRepoPool
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ - description: ID of the repository pool to update.
+ in: path
+ name: poolID
+ required: true
+ type: string
+ - description: Parameters used when updating the repository pool.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdatePoolParams'
+ description: Parameters used when updating the repository pool.
+ type: object
+ responses:
+ "200":
+ description: Pool
+ schema:
+ $ref: '#/definitions/Pool'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update repository pool with the parameters given.
+ tags:
+ - repositories
+ - pools
+ /repositories/{repoID}/scalesets:
+ get:
+ operationId: ListRepoScaleSets
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ScaleSets
+ schema:
+ $ref: '#/definitions/ScaleSets'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List repository scale sets.
+ tags:
+ - repositories
+ - scalesets
+ post:
+ operationId: CreateRepoScaleSet
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ - description: Parameters used when creating the repository scale set.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/CreateScaleSetParams'
+ description: Parameters used when creating the repository scale set.
+ type: object
+ responses:
+ "200":
+ description: ScaleSet
+ schema:
+ $ref: '#/definitions/ScaleSet'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Create repository scale set with the parameters given.
+ tags:
+ - repositories
+ - scalesets
+ /repositories/{repoID}/webhook:
+ delete:
+ operationId: UninstallRepoWebhook
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Uninstall organization webhook.
+ tags:
+ - repositories
+ - hooks
+ get:
+ operationId: GetRepoWebhookInfo
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: HookInfo
+ schema:
+ $ref: '#/definitions/HookInfo'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get information about the GARM installed webhook on a repository.
+ tags:
+ - repositories
+ - hooks
+ post:
+ description: |-
+ Install the GARM webhook for an organization. The secret configured on the organization will
+ be used to validate the requests.
+ operationId: InstallRepoWebhook
+ parameters:
+ - description: Repository ID.
+ in: path
+ name: repoID
+ required: true
+ type: string
+ - description: Parameters used when creating the repository webhook.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/InstallWebhookParams'
+ description: Parameters used when creating the repository webhook.
+ type: object
+ responses:
+ "200":
+ description: HookInfo
+ schema:
+ $ref: '#/definitions/HookInfo'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ tags:
+ - repositories
+ - hooks
+ /scalesets:
+ get:
+ operationId: ListScalesets
+ responses:
+ "200":
+ description: ScaleSets
+ schema:
+ $ref: '#/definitions/ScaleSets'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List all scalesets.
+ tags:
+ - scalesets
+ /scalesets/{scalesetID}:
+ delete:
+ operationId: DeleteScaleSet
+ parameters:
+ - description: ID of the scale set to delete.
+ in: path
+ name: scalesetID
+ required: true
+ type: string
+ responses:
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Delete scale set by ID.
+ tags:
+ - scalesets
+ get:
+ operationId: GetScaleSet
+ parameters:
+ - description: ID of the scale set to fetch.
+ in: path
+ name: scalesetID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: ScaleSet
+ schema:
+ $ref: '#/definitions/ScaleSet'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Get scale set by ID.
+ tags:
+ - scalesets
+ put:
+ operationId: UpdateScaleSet
+ parameters:
+ - description: ID of the scale set to update.
+ in: path
+ name: scalesetID
+ required: true
+ type: string
+ - description: Parameters to update the scale set with.
+ in: body
+ name: Body
+ required: true
+ schema:
+ $ref: '#/definitions/UpdateScaleSetParams'
+ description: Parameters to update the scale set with.
+ type: object
+ responses:
+ "200":
+ description: ScaleSet
+ schema:
+ $ref: '#/definitions/ScaleSet'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: Update scale set by ID.
+ tags:
+ - scalesets
+ /scalesets/{scalesetID}/instances:
+ get:
+ operationId: ListScaleSetInstances
+ parameters:
+ - description: Runner scale set ID.
+ in: path
+ name: scalesetID
+ required: true
+ type: string
+ responses:
+ "200":
+ description: Instances
+ schema:
+ $ref: '#/definitions/Instances'
+ default:
+ description: APIErrorResponse
+ schema:
+ $ref: '#/definitions/APIErrorResponse'
+ summary: List runner instances in a scale set.
+ tags:
+ - instances
+produces:
+ - application/json
+security:
+ - Bearer: []
+securityDefinitions:
+ Bearer:
+ description: 'The token with the `Bearer: ` prefix, e.g. "Bearer abcde12345".'
+ in: header
+ name: Authorization
+ type: apiKey
+swagger: "2.0"
diff --git a/auth/admin_required.go b/auth/admin_required.go
new file mode 100644
index 00000000..b3ca3624
--- /dev/null
+++ b/auth/admin_required.go
@@ -0,0 +1,27 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package auth
+
+import "net/http"
+
+func AdminRequiredMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ if !IsAdmin(ctx) {
+ http.Error(w, "Unauthorized", http.StatusUnauthorized)
+ return
+ }
+ next.ServeHTTP(w, r)
+ })
+}
diff --git a/auth/auth.go b/auth/auth.go
index a442da70..c5fa1ebd 100644
--- a/auth/auth.go
+++ b/auth/auth.go
@@ -16,18 +16,19 @@ package auth
import (
"context"
+ "errors"
+ "fmt"
"time"
+ jwt "github.com/golang-jwt/jwt/v5"
+ "github.com/nbutton23/zxcvbn-go"
+ "golang.org/x/crypto/bcrypt"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
"github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/util"
-
- "github.com/golang-jwt/jwt"
- "github.com/nbutton23/zxcvbn-go"
- "github.com/pkg/errors"
- "golang.org/x/crypto/bcrypt"
)
func NewAuthenticator(cfg config.JWTAuth, store common.Store) *Authenticator {
@@ -49,24 +50,30 @@ func (a *Authenticator) IsInitialized() bool {
func (a *Authenticator) GetJWTToken(ctx context.Context) (string, error) {
tokenID, err := util.GetRandomString(16)
if err != nil {
- return "", errors.Wrap(err, "generating random string")
+ return "", fmt.Errorf("error generating random string: %w", err)
}
- expireToken := time.Now().Add(a.cfg.TimeToLive.Duration()).Unix()
+ expireToken := time.Now().Add(a.cfg.TimeToLive.Duration())
+ expires := &jwt.NumericDate{
+ Time: expireToken,
+ }
+ generation := PasswordGeneration(ctx)
claims := JWTClaims{
- StandardClaims: jwt.StandardClaims{
- ExpiresAt: expireToken,
+ RegisteredClaims: jwt.RegisteredClaims{
+ ExpiresAt: expires,
+ // nolint:golangci-lint,godox
// TODO: make this configurable
Issuer: "garm",
},
- UserID: UserID(ctx),
- TokenID: tokenID,
- IsAdmin: IsAdmin(ctx),
- FullName: FullName(ctx),
+ UserID: UserID(ctx),
+ TokenID: tokenID,
+ IsAdmin: IsAdmin(ctx),
+ FullName: FullName(ctx),
+ Generation: generation,
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err := token.SignedString([]byte(a.cfg.Secret))
if err != nil {
- return "", errors.Wrap(err, "fetching token string")
+ return "", fmt.Errorf("error fetching token string: %w", err)
}
return tokenString, nil
@@ -75,22 +82,26 @@ func (a *Authenticator) GetJWTToken(ctx context.Context) (string, error) {
// GetJWTMetricsToken returns a JWT token that can be used to read metrics.
// This token is not tied to a user, no user is stored in the db.
func (a *Authenticator) GetJWTMetricsToken(ctx context.Context) (string, error) {
-
if !IsAdmin(ctx) {
return "", runnerErrors.ErrUnauthorized
}
tokenID, err := util.GetRandomString(16)
if err != nil {
- return "", errors.Wrap(err, "generating random string")
+ return "", fmt.Errorf("error generating random string: %w", err)
}
+ // nolint:golangci-lint,godox
// TODO: currently this is the same TTL as the normal Token
// maybe we should make this configurable
// it's usually pretty nasty if the monitoring fails because the token expired
- expireToken := time.Now().Add(a.cfg.TimeToLive.Duration()).Unix()
+ expireToken := time.Now().Add(a.cfg.TimeToLive.Duration())
+ expires := &jwt.NumericDate{
+ Time: expireToken,
+ }
claims := JWTClaims{
- StandardClaims: jwt.StandardClaims{
- ExpiresAt: expireToken,
+ RegisteredClaims: jwt.RegisteredClaims{
+ ExpiresAt: expires,
+ // nolint:golangci-lint,godox
// TODO: make this configurable
Issuer: "garm",
},
@@ -101,7 +112,7 @@ func (a *Authenticator) GetJWTMetricsToken(ctx context.Context) (string, error)
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err := token.SignedString([]byte(a.cfg.Secret))
if err != nil {
- return "", errors.Wrap(err, "fetching token string")
+ return "", fmt.Errorf("error fetching token string: %w", err)
}
return tokenString, nil
@@ -111,7 +122,7 @@ func (a *Authenticator) InitController(ctx context.Context, param params.NewUser
_, err := a.store.ControllerInfo()
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.User{}, errors.Wrap(err, "initializing controller")
+ return params.User{}, fmt.Errorf("error initializing controller: %w", err)
}
}
if a.store.HasAdminUser(ctx) {
@@ -141,7 +152,7 @@ func (a *Authenticator) InitController(ctx context.Context, param params.NewUser
hashed, err := util.PaswsordToBcrypt(param.Password)
if err != nil {
- return params.User{}, errors.Wrap(err, "creating user")
+ return params.User{}, fmt.Errorf("error creating user: %w", err)
}
param.Password = hashed
@@ -159,7 +170,7 @@ func (a *Authenticator) AuthenticateUser(ctx context.Context, info params.Passwo
if errors.Is(err, runnerErrors.ErrNotFound) {
return ctx, runnerErrors.ErrUnauthorized
}
- return ctx, errors.Wrap(err, "authenticating")
+ return ctx, fmt.Errorf("error authenticating: %w", err)
}
if !user.Enabled {
@@ -174,5 +185,5 @@ func (a *Authenticator) AuthenticateUser(ctx context.Context, info params.Passwo
return ctx, runnerErrors.ErrUnauthorized
}
- return PopulateContext(ctx, user), nil
+ return PopulateContext(ctx, user, nil), nil
}
diff --git a/auth/context.go b/auth/context.go
index 694fe26e..1b648bb6 100644
--- a/auth/context.go
+++ b/auth/context.go
@@ -16,9 +16,10 @@ package auth
import (
"context"
+ "time"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/providers/common"
)
type contextFlags string
@@ -28,9 +29,11 @@ const (
fullNameKey contextFlags = "full_name"
readMetricsKey contextFlags = "read_metrics"
// UserIDFlag is the User ID flag we set in the context
- UserIDFlag contextFlags = "user_id"
- isEnabledFlag contextFlags = "is_enabled"
- jwtTokenFlag contextFlags = "jwt_token"
+ UserIDFlag contextFlags = "user_id"
+ isEnabledFlag contextFlags = "is_enabled"
+ jwtTokenFlag contextFlags = "jwt_token"
+ authExpiresFlag contextFlags = "auth_expires"
+ passwordGenerationFlag contextFlags = "password_generation"
instanceIDKey contextFlags = "id"
instanceNameKey contextFlags = "name"
@@ -39,8 +42,23 @@ const (
instanceEntityKey contextFlags = "entity"
instanceRunnerStatus contextFlags = "status"
instanceTokenFetched contextFlags = "tokenFetched"
+ instanceHasJITConfig contextFlags = "hasJITConfig"
+ instanceParams contextFlags = "instanceParams"
+ instanceForgeTypeKey contextFlags = "forge_type"
)
+func SetInstanceForgeType(ctx context.Context, val string) context.Context {
+ return context.WithValue(ctx, instanceForgeTypeKey, val)
+}
+
+func InstanceForgeType(ctx context.Context) params.EndpointType {
+ elem := ctx.Value(instanceForgeTypeKey)
+ if elem == nil {
+ return ""
+ }
+ return elem.(params.EndpointType)
+}
+
func SetInstanceID(ctx context.Context, id string) context.Context {
return context.WithValue(ctx, instanceIDKey, id)
}
@@ -65,16 +83,45 @@ func InstanceTokenFetched(ctx context.Context) bool {
return elem.(bool)
}
-func SetInstanceRunnerStatus(ctx context.Context, val common.RunnerStatus) context.Context {
+func SetInstanceHasJITConfig(ctx context.Context, cfg map[string]string) context.Context {
+ return context.WithValue(ctx, instanceHasJITConfig, len(cfg) > 0)
+}
+
+func InstanceHasJITConfig(ctx context.Context) bool {
+ elem := ctx.Value(instanceHasJITConfig)
+ if elem == nil {
+ return false
+ }
+ return elem.(bool)
+}
+
+func SetInstanceParams(ctx context.Context, instance params.Instance) context.Context {
+ return context.WithValue(ctx, instanceParams, instance)
+}
+
+func InstanceParams(ctx context.Context) (params.Instance, error) {
+ elem := ctx.Value(instanceParams)
+ if elem == nil {
+ return params.Instance{}, runnerErrors.ErrNotFound
+ }
+
+ instanceParams, ok := elem.(params.Instance)
+ if !ok {
+ return params.Instance{}, runnerErrors.ErrNotFound
+ }
+ return instanceParams, nil
+}
+
+func SetInstanceRunnerStatus(ctx context.Context, val params.RunnerStatus) context.Context {
return context.WithValue(ctx, instanceRunnerStatus, val)
}
-func InstanceRunnerStatus(ctx context.Context) common.RunnerStatus {
+func InstanceRunnerStatus(ctx context.Context) params.RunnerStatus {
elem := ctx.Value(instanceRunnerStatus)
if elem == nil {
- return common.RunnerPending
+ return params.RunnerPending
}
- return elem.(common.RunnerStatus)
+ return elem.(params.RunnerStatus)
}
func SetInstanceName(ctx context.Context, val string) context.Context {
@@ -125,25 +172,57 @@ func InstanceEntity(ctx context.Context) string {
return elem.(string)
}
-func PopulateInstanceContext(ctx context.Context, instance params.Instance) context.Context {
+func PopulateInstanceContext(ctx context.Context, instance params.Instance, claims *InstanceJWTClaims) context.Context {
ctx = SetInstanceID(ctx, instance.ID)
ctx = SetInstanceName(ctx, instance.Name)
ctx = SetInstancePoolID(ctx, instance.PoolID)
ctx = SetInstanceRunnerStatus(ctx, instance.RunnerStatus)
ctx = SetInstanceTokenFetched(ctx, instance.TokenFetched)
+ ctx = SetInstanceHasJITConfig(ctx, instance.JitConfiguration)
+ ctx = SetInstanceParams(ctx, instance)
+ ctx = SetInstanceForgeType(ctx, claims.ForgeType)
return ctx
}
// PopulateContext sets the appropriate fields in the context, based on
// the user object
-func PopulateContext(ctx context.Context, user params.User) context.Context {
+func PopulateContext(ctx context.Context, user params.User, authExpires *time.Time) context.Context {
ctx = SetUserID(ctx, user.ID)
ctx = SetAdmin(ctx, user.IsAdmin)
ctx = SetIsEnabled(ctx, user.Enabled)
ctx = SetFullName(ctx, user.FullName)
+ ctx = SetExpires(ctx, authExpires)
+ ctx = SetPasswordGeneration(ctx, user.Generation)
return ctx
}
+func SetExpires(ctx context.Context, expires *time.Time) context.Context {
+ if expires == nil {
+ return ctx
+ }
+ return context.WithValue(ctx, authExpiresFlag, expires)
+}
+
+func Expires(ctx context.Context) *time.Time {
+ elem := ctx.Value(authExpiresFlag)
+ if elem == nil {
+ return nil
+ }
+ return elem.(*time.Time)
+}
+
+func SetPasswordGeneration(ctx context.Context, val uint) context.Context {
+ return context.WithValue(ctx, passwordGenerationFlag, val)
+}
+
+func PasswordGeneration(ctx context.Context) uint {
+ elem := ctx.Value(passwordGenerationFlag)
+ if elem == nil {
+ return 0
+ }
+ return elem.(uint)
+}
+
// SetFullName sets the user full name in the context
func SetFullName(ctx context.Context, fullName string) context.Context {
return context.WithValue(ctx, fullNameKey, fullName)
@@ -205,8 +284,10 @@ func UserID(ctx context.Context) string {
// GetAdminContext will return an admin context. This can be used internally
// when fetching users.
-func GetAdminContext() context.Context {
- ctx := context.Background()
+func GetAdminContext(ctx context.Context) context.Context {
+ if ctx == nil {
+ ctx = context.Background()
+ }
ctx = SetUserID(ctx, "")
ctx = SetAdmin(ctx, true)
ctx = SetIsEnabled(ctx, true)
diff --git a/auth/init_required.go b/auth/init_required.go
index 6265649e..3ef31d70 100644
--- a/auth/init_required.go
+++ b/auth/init_required.go
@@ -16,7 +16,7 @@ package auth
import (
"encoding/json"
- "log"
+ "log/slog"
"net/http"
"github.com/cloudbase/garm/apiserver/params"
@@ -37,16 +37,44 @@ type initRequired struct {
// Middleware implements the middleware interface
func (i *initRequired) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- ctrlInfo, err := i.store.ControllerInfo()
- if err != nil || ctrlInfo.ControllerID.String() == "" {
+ ctx := r.Context()
+
+ if !i.store.HasAdminUser(ctx) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusConflict)
if err := json.NewEncoder(w).Encode(params.InitializationRequired); err != nil {
- log.Printf("failed to encode response: %s", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
return
}
- ctx := r.Context()
+
+ next.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+
+func NewUrlsRequiredMiddleware(store common.Store) (Middleware, error) {
+ return &urlsRequired{
+ store: store,
+ }, nil
+}
+
+type urlsRequired struct {
+ store common.Store
+}
+
+func (u *urlsRequired) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ ctrlInfo, err := u.store.ControllerInfo()
+ if err != nil || ctrlInfo.MetadataURL == "" || ctrlInfo.CallbackURL == "" {
+ w.Header().Add("Content-Type", "application/json")
+ w.WriteHeader(http.StatusConflict)
+ if err := json.NewEncoder(w).Encode(params.URLsRequired); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
+ }
+ return
+ }
+
next.ServeHTTP(w, r.WithContext(ctx))
})
}
diff --git a/auth/instance_middleware.go b/auth/instance_middleware.go
index 3af99d6f..6d1d66e4 100644
--- a/auth/instance_middleware.go
+++ b/auth/instance_middleware.go
@@ -17,19 +17,20 @@ package auth
import (
"context"
"fmt"
+ "log/slog"
+ "math"
"net/http"
"strings"
"time"
+ jwt "github.com/golang-jwt/jwt/v5"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
"github.com/cloudbase/garm/config"
dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
- providerCommon "github.com/cloudbase/garm/runner/providers/common"
-
- "github.com/golang-jwt/jwt"
- "github.com/pkg/errors"
)
// InstanceJWTClaims holds JWT claims
@@ -38,32 +39,58 @@ type InstanceJWTClaims struct {
Name string `json:"name"`
PoolID string `json:"provider_id"`
// Scope is either repository or organization
- Scope params.PoolType `json:"scope"`
+ Scope params.ForgeEntityType `json:"scope"`
// Entity is the repo or org name
- Entity string `json:"entity"`
- jwt.StandardClaims
+ Entity string `json:"entity"`
+ CreateAttempt int `json:"create_attempt"`
+ ForgeType string `json:"forge_type"`
+ jwt.RegisteredClaims
}
-func NewInstanceJWTToken(instance params.Instance, secret, entity string, poolType params.PoolType, ttlMinutes uint) (string, error) {
+func NewInstanceTokenGetter(jwtSecret string) (InstanceTokenGetter, error) {
+ if jwtSecret == "" {
+ return nil, fmt.Errorf("jwt secret is required")
+ }
+ return &instanceToken{
+ jwtSecret: jwtSecret,
+ }, nil
+}
+
+type instanceToken struct {
+ jwtSecret string
+}
+
+func (i *instanceToken) NewInstanceJWTToken(instance params.Instance, entity params.ForgeEntity, entityType params.ForgeEntityType, ttlMinutes uint) (string, error) {
// Token expiration is equal to the bootstrap timeout set on the pool plus the polling
// interval garm uses to check for timed out runners. Runners that have not sent their info
// by the end of this interval are most likely failed and will be reaped by garm anyway.
- expireToken := time.Now().Add(time.Duration(ttlMinutes)*time.Minute + common.PoolReapTimeoutInterval).Unix()
+ var ttl int
+ if ttlMinutes > math.MaxInt {
+ ttl = math.MaxInt
+ } else {
+ ttl = int(ttlMinutes)
+ }
+ expireToken := time.Now().Add(time.Duration(ttl)*time.Minute + common.PoolReapTimeoutInterval)
+ expires := &jwt.NumericDate{
+ Time: expireToken,
+ }
claims := InstanceJWTClaims{
- StandardClaims: jwt.StandardClaims{
- ExpiresAt: expireToken,
+ RegisteredClaims: jwt.RegisteredClaims{
+ ExpiresAt: expires,
Issuer: "garm",
},
- ID: instance.ID,
- Name: instance.Name,
- PoolID: instance.PoolID,
- Scope: poolType,
- Entity: entity,
+ ID: instance.ID,
+ Name: instance.Name,
+ PoolID: instance.PoolID,
+ Scope: entityType,
+ Entity: entity.String(),
+ ForgeType: string(entity.Credentials.ForgeType),
+ CreateAttempt: instance.CreateAttempt,
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
- tokenString, err := token.SignedString([]byte(secret))
+ tokenString, err := token.SignedString([]byte(i.jwtSecret))
if err != nil {
- return "", errors.Wrap(err, "signing token")
+ return "", fmt.Errorf("error signing token: %w", err)
}
return tokenString, nil
@@ -93,29 +120,30 @@ func (amw *instanceMiddleware) claimsToContext(ctx context.Context, claims *Inst
return nil, runnerErrors.ErrUnauthorized
}
- instanceInfo, err := amw.store.GetInstanceByName(ctx, claims.Name)
+ instanceInfo, err := amw.store.GetInstance(ctx, claims.Name)
if err != nil {
return ctx, runnerErrors.ErrUnauthorized
}
- ctx = PopulateInstanceContext(ctx, instanceInfo)
+ ctx = PopulateInstanceContext(ctx, instanceInfo, claims)
return ctx, nil
}
// Middleware implements the middleware interface
func (amw *instanceMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // nolint:golangci-lint,godox
// TODO: Log error details when authentication fails
ctx := r.Context()
authorizationHeader := r.Header.Get("authorization")
if authorizationHeader == "" {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
bearerToken := strings.Split(authorizationHeader, " ")
if len(bearerToken) != 2 {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
@@ -126,32 +154,61 @@ func (amw *instanceMiddleware) Middleware(next http.Handler) http.Handler {
}
return []byte(amw.cfg.Secret), nil
})
-
if err != nil {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
if !token.Valid {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
ctx, err = amw.claimsToContext(ctx, claims)
if err != nil {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
if InstanceID(ctx) == "" {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
runnerStatus := InstanceRunnerStatus(ctx)
- if runnerStatus != providerCommon.RunnerInstalling && runnerStatus != providerCommon.RunnerPending {
+ if runnerStatus != params.RunnerInstalling && runnerStatus != params.RunnerPending {
// Instances that have finished installing can no longer authenticate to the API
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
+ return
+ }
+
+ instanceParams, err := InstanceParams(ctx)
+ if err != nil {
+ slog.InfoContext(
+ ctx, "could not find instance params",
+ "runner_name", InstanceName(ctx))
+ invalidAuthResponse(ctx, w)
+ return
+ }
+
+ // Token was generated for a previous attempt at creating this instance.
+ if claims.CreateAttempt != instanceParams.CreateAttempt {
+ slog.InfoContext(
+ ctx, "invalid token create attempt",
+ "runner_name", InstanceName(ctx),
+ "token_create_attempt", claims.CreateAttempt,
+ "instance_create_attempt", instanceParams.CreateAttempt)
+ invalidAuthResponse(ctx, w)
+ return
+ }
+
+ // Only allow instances that are in the creating or running state to authenticate.
+ if instanceParams.Status != commonParams.InstanceCreating && instanceParams.Status != commonParams.InstanceRunning {
+ slog.InfoContext(
+ ctx, "invalid instance status",
+ "runner_name", InstanceName(ctx),
+ "status", instanceParams.Status)
+ invalidAuthResponse(ctx, w)
return
}
diff --git a/auth/interfaces.go b/auth/interfaces.go
index fa5ca43c..ab68dbd7 100644
--- a/auth/interfaces.go
+++ b/auth/interfaces.go
@@ -14,9 +14,17 @@
package auth
-import "net/http"
+import (
+ "net/http"
+
+ "github.com/cloudbase/garm/params"
+)
// Middleware defines an authentication middleware
type Middleware interface {
Middleware(next http.Handler) http.Handler
}
+
+type InstanceTokenGetter interface {
+ NewInstanceJWTToken(instance params.Instance, entity params.ForgeEntity, poolType params.ForgeEntityType, ttlMinutes uint) (string, error)
+}
diff --git a/auth/jwt.go b/auth/jwt.go
index 14dd857d..52fce0c9 100644
--- a/auth/jwt.go
+++ b/auth/jwt.go
@@ -18,16 +18,17 @@ import (
"context"
"encoding/json"
"fmt"
- "log"
+ "log/slog"
"net/http"
"strings"
+ "time"
+ jwt "github.com/golang-jwt/jwt/v5"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
apiParams "github.com/cloudbase/garm/apiserver/params"
"github.com/cloudbase/garm/config"
dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
-
- "github.com/golang-jwt/jwt"
)
// JWTClaims holds JWT claims
@@ -37,7 +38,8 @@ type JWTClaims struct {
FullName string `json:"full_name"`
IsAdmin bool `json:"is_admin"`
ReadMetrics bool `json:"read_metrics"`
- jwt.StandardClaims
+ Generation uint `json:"generation"`
+ jwt.RegisteredClaims
}
// jwtMiddleware is the authentication middleware
@@ -69,63 +71,85 @@ func (amw *jwtMiddleware) claimsToContext(ctx context.Context, claims *JWTClaims
return ctx, runnerErrors.ErrUnauthorized
}
- ctx = PopulateContext(ctx, userInfo)
+ var expiresAt *time.Time
+ if claims.ExpiresAt != nil {
+ expires := claims.ExpiresAt.Time.UTC()
+ expiresAt = &expires
+ }
+
+ if userInfo.Generation != claims.Generation {
+ // Password was reset since token was issued. Invalidate.
+ return ctx, runnerErrors.ErrUnauthorized
+ }
+
+ ctx = PopulateContext(ctx, userInfo, expiresAt)
return ctx, nil
}
-func invalidAuthResponse(w http.ResponseWriter) {
- w.WriteHeader(http.StatusUnauthorized)
+func invalidAuthResponse(ctx context.Context, w http.ResponseWriter) {
w.Header().Add("Content-Type", "application/json")
+ w.WriteHeader(http.StatusUnauthorized)
if err := json.NewEncoder(w).Encode(
apiParams.APIErrorResponse{
Error: "Authentication failed",
}); err != nil {
- log.Printf("failed to encode response: %s", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to encode response")
}
}
+func (amw *jwtMiddleware) getTokenFromRequest(r *http.Request) (string, error) {
+ authorizationHeader := r.Header.Get("authorization")
+ if authorizationHeader == "" {
+ cookie, err := r.Cookie("garm_token")
+ if err != nil {
+ return "", fmt.Errorf("failed to get cookie: %w", err)
+ }
+ return cookie.Value, nil
+ }
+
+ bearerToken := strings.Split(authorizationHeader, " ")
+ if len(bearerToken) != 2 {
+ return "", fmt.Errorf("invalid auth header")
+ }
+ return bearerToken[1], nil
+}
+
// Middleware implements the middleware interface
func (amw *jwtMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // nolint:golangci-lint,godox
// TODO: Log error details when authentication fails
ctx := r.Context()
- authorizationHeader := r.Header.Get("authorization")
- if authorizationHeader == "" {
- invalidAuthResponse(w)
+ authToken, err := amw.getTokenFromRequest(r)
+ if err != nil {
+ slog.ErrorContext(ctx, "failed to get auth token", "error", err)
+ invalidAuthResponse(ctx, w)
return
}
-
- bearerToken := strings.Split(authorizationHeader, " ")
- if len(bearerToken) != 2 {
- invalidAuthResponse(w)
- return
- }
-
claims := &JWTClaims{}
- token, err := jwt.ParseWithClaims(bearerToken[1], claims, func(token *jwt.Token) (interface{}, error) {
+ token, err := jwt.ParseWithClaims(authToken, claims, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("invalid signing method")
}
return []byte(amw.cfg.Secret), nil
})
-
if err != nil {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
if !token.Valid {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
ctx, err = amw.claimsToContext(ctx, claims)
if err != nil {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
if !IsEnabled(ctx) {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
diff --git a/auth/metrics.go b/auth/metrics.go
index 11c25072..5ea688e2 100644
--- a/auth/metrics.go
+++ b/auth/metrics.go
@@ -1,3 +1,16 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
package auth
import (
@@ -6,9 +19,9 @@ import (
"net/http"
"strings"
- "github.com/cloudbase/garm/config"
+ jwt "github.com/golang-jwt/jwt/v5"
- "github.com/golang-jwt/jwt"
+ "github.com/cloudbase/garm/config"
)
type MetricsMiddleware struct {
@@ -23,17 +36,16 @@ func NewMetricsMiddleware(cfg config.JWTAuth) (*MetricsMiddleware, error) {
func (m *MetricsMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-
ctx := r.Context()
authorizationHeader := r.Header.Get("authorization")
if authorizationHeader == "" {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
bearerToken := strings.Split(authorizationHeader, " ")
if len(bearerToken) != 2 {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
@@ -44,20 +56,19 @@ func (m *MetricsMiddleware) Middleware(next http.Handler) http.Handler {
}
return []byte(m.cfg.Secret), nil
})
-
if err != nil {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
if !token.Valid {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
// we fully trust the claims
if !claims.ReadMetrics {
- invalidAuthResponse(w)
+ invalidAuthResponse(ctx, w)
return
}
diff --git a/build-webapp.sh b/build-webapp.sh
new file mode 100755
index 00000000..01b13c04
--- /dev/null
+++ b/build-webapp.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+set -e
+
+echo "Building GARM SPA (SvelteKit)..."
+
+# Navigate to webapp directory
+cd webapp
+
+# Install dependencies if node_modules doesn't exist
+npm install
+
+# Build the SPA
+echo "Building SPA..."
+npm run build
+echo "SPA built successfully!"
diff --git a/cache/cache_test.go b/cache/cache_test.go
new file mode 100644
index 00000000..7a8ebed3
--- /dev/null
+++ b/cache/cache_test.go
@@ -0,0 +1,1040 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/suite"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+type CacheTestSuite struct {
+ suite.Suite
+ entity params.ForgeEntity
+}
+
+func (c *CacheTestSuite) SetupTest() {
+ c.entity = params.ForgeEntity{
+ ID: "1234",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: params.ForgeCredentials{
+ ID: 1,
+ Name: "test",
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+}
+
+func (c *CacheTestSuite) TearDownTest() {
+ // Clean up the cache after each test
+ githubToolsCache.mux.Lock()
+ defer githubToolsCache.mux.Unlock()
+ githubToolsCache.entities = make(map[string]GithubEntityTools)
+ giteaCredentialsCache.cache = make(map[uint]params.ForgeCredentials)
+ credentialsCache.cache = make(map[uint]params.ForgeCredentials)
+ instanceCache.cache = make(map[string]params.Instance)
+ entityCache = &EntityCache{
+ entities: make(map[string]EntityItem),
+ }
+}
+
+func (c *CacheTestSuite) TestCacheIsInitialized() {
+ c.Require().NotNil(githubToolsCache)
+ c.Require().NotNil(credentialsCache)
+ c.Require().NotNil(instanceCache)
+ c.Require().NotNil(entityCache)
+}
+
+func (c *CacheTestSuite) TestSetToolsCacheWorks() {
+ tools := []commonParams.RunnerApplicationDownload{
+ {
+ DownloadURL: garmTesting.Ptr("https://example.com"),
+ },
+ }
+ c.Require().NotNil(githubToolsCache)
+ c.Require().Len(githubToolsCache.entities, 0)
+ SetGithubToolsCache(c.entity, tools)
+ c.Require().Len(githubToolsCache.entities, 1)
+ cachedTools, err := GetGithubToolsCache(c.entity.ID)
+ c.Require().NoError(err)
+ c.Require().Len(cachedTools, 1)
+ c.Require().Equal(tools[0].GetDownloadURL(), cachedTools[0].GetDownloadURL())
+}
+
+func (c *CacheTestSuite) TestSetToolsCacheWithError() {
+ tools := []commonParams.RunnerApplicationDownload{
+ {
+ DownloadURL: garmTesting.Ptr("https://example.com"),
+ },
+ }
+ c.Require().NotNil(githubToolsCache)
+ c.Require().Len(githubToolsCache.entities, 0)
+ SetGithubToolsCache(c.entity, tools)
+ entity := githubToolsCache.entities[c.entity.ID]
+
+ c.Require().Equal(int64(entity.expiresAt.Sub(entity.updatedAt).Minutes()), int64(60))
+ c.Require().Len(githubToolsCache.entities, 1)
+ SetGithubToolsCacheError(c.entity, runnerErrors.ErrNotFound)
+
+ cachedTools, err := GetGithubToolsCache(c.entity.ID)
+ c.Require().Error(err)
+ c.Require().Nil(cachedTools)
+}
+
+func (c *CacheTestSuite) TestSetErrorOnNonExistingCacheEntity() {
+ entity := params.ForgeEntity{
+ ID: "non-existing-entity",
+ }
+ c.Require().NotNil(githubToolsCache)
+ c.Require().Len(githubToolsCache.entities, 0)
+ SetGithubToolsCacheError(entity, runnerErrors.ErrNotFound)
+
+ storedEntity, err := GetGithubToolsCache(entity.ID)
+ c.Require().Error(err)
+ c.Require().Nil(storedEntity)
+}
+
+func (c *CacheTestSuite) TestTimedOutToolsCache() {
+ tools := []commonParams.RunnerApplicationDownload{
+ {
+ DownloadURL: garmTesting.Ptr("https://example.com"),
+ },
+ }
+
+ c.Require().NotNil(githubToolsCache)
+ c.Require().Len(githubToolsCache.entities, 0)
+ SetGithubToolsCache(c.entity, tools)
+ entity := githubToolsCache.entities[c.entity.ID]
+
+ c.Require().Equal(int64(entity.expiresAt.Sub(entity.updatedAt).Minutes()), int64(60))
+ c.Require().Len(githubToolsCache.entities, 1)
+ entity = githubToolsCache.entities[c.entity.ID]
+ entity.updatedAt = entity.updatedAt.Add(-3 * time.Hour)
+ entity.expiresAt = entity.updatedAt.Add(-2 * time.Hour)
+ githubToolsCache.entities[c.entity.ID] = entity
+
+ cachedTools, err := GetGithubToolsCache(c.entity.ID)
+ c.Require().Error(err)
+ c.Require().Nil(cachedTools)
+}
+
+func (c *CacheTestSuite) TestGetInexistentCache() {
+ c.Require().NotNil(githubToolsCache)
+ c.Require().Len(githubToolsCache.entities, 0)
+ cachedTools, err := GetGithubToolsCache(c.entity.ID)
+ c.Require().Error(err)
+ c.Require().Nil(cachedTools)
+}
+
+func (c *CacheTestSuite) TestSetGithubCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ }
+ SetGithubCredentials(credentials)
+ cachedCreds, ok := GetGithubCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+}
+
+func (c *CacheTestSuite) TestGetGithubCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ }
+ SetGithubCredentials(credentials)
+ cachedCreds, ok := GetGithubCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+
+ nonExisting, ok := GetGithubCredentials(2)
+ c.Require().False(ok)
+ c.Require().Equal(params.ForgeCredentials{}, nonExisting)
+}
+
+func (c *CacheTestSuite) TestDeleteGithubCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ }
+ SetGithubCredentials(credentials)
+ cachedCreds, ok := GetGithubCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+
+ DeleteGithubCredentials(1)
+ cachedCreds, ok = GetGithubCredentials(1)
+ c.Require().False(ok)
+ c.Require().Equal(params.ForgeCredentials{}, cachedCreds)
+}
+
+func (c *CacheTestSuite) TestGetAllGithubCredentials() {
+ credentials1 := params.ForgeCredentials{
+ ID: 1,
+ }
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ }
+ SetGithubCredentials(credentials1)
+ SetGithubCredentials(credentials2)
+
+ cachedCreds := GetAllGithubCredentials()
+ c.Require().Len(cachedCreds, 2)
+ c.Require().Contains(cachedCreds, credentials1)
+ c.Require().Contains(cachedCreds, credentials2)
+}
+
+func (c *CacheTestSuite) TestSetInstanceCache() {
+ instance := params.Instance{
+ Name: "test-instance",
+ }
+ SetInstanceCache(instance)
+ cachedInstance, ok := GetInstanceCache("test-instance")
+ c.Require().True(ok)
+ c.Require().Equal(instance.Name, cachedInstance.Name)
+}
+
+func (c *CacheTestSuite) TestGetInstanceCache() {
+ instance := params.Instance{
+ Name: "test-instance",
+ }
+ SetInstanceCache(instance)
+ cachedInstance, ok := GetInstanceCache("test-instance")
+ c.Require().True(ok)
+ c.Require().Equal(instance.Name, cachedInstance.Name)
+
+ nonExisting, ok := GetInstanceCache("non-existing")
+ c.Require().False(ok)
+ c.Require().Equal(params.Instance{}, nonExisting)
+}
+
+func (c *CacheTestSuite) TestDeleteInstanceCache() {
+ instance := params.Instance{
+ Name: "test-instance",
+ }
+ SetInstanceCache(instance)
+ cachedInstance, ok := GetInstanceCache("test-instance")
+ c.Require().True(ok)
+ c.Require().Equal(instance.Name, cachedInstance.Name)
+
+ DeleteInstanceCache("test-instance")
+ cachedInstance, ok = GetInstanceCache("test-instance")
+ c.Require().False(ok)
+ c.Require().Equal(params.Instance{}, cachedInstance)
+}
+
+func (c *CacheTestSuite) TestGetAllInstances() {
+ instance1 := params.Instance{
+ Name: "test-instance-1",
+ }
+ instance2 := params.Instance{
+ Name: "test-instance-2",
+ }
+ SetInstanceCache(instance1)
+ SetInstanceCache(instance2)
+
+ cachedInstances := GetAllInstancesCache()
+ c.Require().Len(cachedInstances, 2)
+ c.Require().Contains(cachedInstances, instance1)
+ c.Require().Contains(cachedInstances, instance2)
+}
+
+func (c *CacheTestSuite) TestGetInstancesForPool() {
+ instance1 := params.Instance{
+ Name: "test-instance-1",
+ PoolID: "pool-1",
+ }
+ instance2 := params.Instance{
+ Name: "test-instance-2",
+ PoolID: "pool-1",
+ }
+ instance3 := params.Instance{
+ Name: "test-instance-3",
+ PoolID: "pool-2",
+ }
+ SetInstanceCache(instance1)
+ SetInstanceCache(instance2)
+ SetInstanceCache(instance3)
+
+ cachedInstances := GetInstancesForPool("pool-1")
+ c.Require().Len(cachedInstances, 2)
+ c.Require().Contains(cachedInstances, instance1)
+ c.Require().Contains(cachedInstances, instance2)
+
+ cachedInstances = GetInstancesForPool("pool-2")
+ c.Require().Len(cachedInstances, 1)
+ c.Require().Contains(cachedInstances, instance3)
+}
+
+func (c *CacheTestSuite) TestGetInstancesForScaleSet() {
+ instance1 := params.Instance{
+ Name: "test-instance-1",
+ ScaleSetID: 1,
+ }
+ instance2 := params.Instance{
+ Name: "test-instance-2",
+ ScaleSetID: 1,
+ }
+ instance3 := params.Instance{
+ Name: "test-instance-3",
+ ScaleSetID: 2,
+ }
+ SetInstanceCache(instance1)
+ SetInstanceCache(instance2)
+ SetInstanceCache(instance3)
+
+ cachedInstances := GetInstancesForScaleSet(1)
+ c.Require().Len(cachedInstances, 2)
+ c.Require().Contains(cachedInstances, instance1)
+ c.Require().Contains(cachedInstances, instance2)
+
+ cachedInstances = GetInstancesForScaleSet(2)
+ c.Require().Len(cachedInstances, 1)
+ c.Require().Contains(cachedInstances, instance3)
+}
+
+func (c *CacheTestSuite) TestSetGetEntityCache() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ SetEntity(entity)
+ cachedEntity, ok := GetEntity("test-entity")
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+
+ pool := params.Pool{
+ ID: "pool-1",
+ }
+ SetEntityPool(entity.ID, pool)
+ cachedEntityPools := GetEntityPools("test-entity")
+ c.Require().Equal(1, len(cachedEntityPools))
+
+ entity.Credentials.Description = "test description"
+ SetEntity(entity)
+ cachedEntity, ok = GetEntity("test-entity")
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ c.Require().Equal(entity.Credentials.Description, cachedEntity.Credentials.Description)
+
+ // Make sure we don't clobber pools after updating the entity
+ cachedEntityPools = GetEntityPools("test-entity")
+ c.Require().Equal(1, len(cachedEntityPools))
+}
+
+func (c *CacheTestSuite) TestReplaceEntityPools() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: params.ForgeCredentials{
+ ID: 1,
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+ pool1 := params.Pool{
+ ID: "pool-1",
+ }
+ pool2 := params.Pool{
+ ID: "pool-2",
+ }
+
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Name: "test",
+ ForgeType: params.GithubEndpointType,
+ }
+ SetGithubCredentials(credentials)
+
+ SetEntity(entity)
+ ReplaceEntityPools(entity.ID, []params.Pool{pool1, pool2})
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ c.Require().Equal("test", cachedEntity.Credentials.Name)
+
+ pools := GetEntityPools(entity.ID)
+ c.Require().Len(pools, 2)
+ c.Require().Contains(pools, pool1)
+ c.Require().Contains(pools, pool2)
+}
+
+func (c *CacheTestSuite) TestReplaceEntityScaleSets() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ scaleSet1 := params.ScaleSet{
+ ID: 1,
+ }
+ scaleSet2 := params.ScaleSet{
+ ID: 2,
+ }
+
+ SetEntity(entity)
+ ReplaceEntityScaleSets(entity.ID, []params.ScaleSet{scaleSet1, scaleSet2})
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+
+ scaleSets := GetEntityScaleSets(entity.ID)
+ c.Require().Len(scaleSets, 2)
+ c.Require().Contains(scaleSets, scaleSet1)
+ c.Require().Contains(scaleSets, scaleSet2)
+}
+
+func (c *CacheTestSuite) TestDeleteEntity() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ SetEntity(entity)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+
+ DeleteEntity(entity.ID)
+ cachedEntity, ok = GetEntity(entity.ID)
+ c.Require().False(ok)
+ c.Require().Equal(params.ForgeEntity{}, cachedEntity)
+}
+
+func (c *CacheTestSuite) TestSetEntityPool() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ pool := params.Pool{
+ ID: "pool-1",
+ }
+
+ SetEntity(entity)
+
+ SetEntityPool(entity.ID, pool)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ pools := GetEntityPools(entity.ID)
+ c.Require().Len(pools, 1)
+ c.Require().Contains(pools, pool)
+ c.Require().False(pools[0].Enabled)
+
+ pool.Enabled = true
+ SetEntityPool(entity.ID, pool)
+ cachedEntity, ok = GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ pools = GetEntityPools(entity.ID)
+ c.Require().Len(pools, 1)
+ c.Require().Contains(pools, pool)
+ c.Require().True(pools[0].Enabled)
+}
+
+func (c *CacheTestSuite) TestSetEntityScaleSet() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ scaleSet := params.ScaleSet{
+ ID: 1,
+ }
+
+ SetEntity(entity)
+ SetEntityScaleSet(entity.ID, scaleSet)
+
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ scaleSets := GetEntityScaleSets(entity.ID)
+ c.Require().Len(scaleSets, 1)
+ c.Require().Contains(scaleSets, scaleSet)
+ c.Require().False(scaleSets[0].Enabled)
+
+ scaleSet.Enabled = true
+ SetEntityScaleSet(entity.ID, scaleSet)
+ scaleSets = GetEntityScaleSets(entity.ID)
+ c.Require().Len(scaleSets, 1)
+ c.Require().Contains(scaleSets, scaleSet)
+ c.Require().True(scaleSets[0].Enabled)
+}
+
+func (c *CacheTestSuite) TestDeleteEntityPool() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ pool := params.Pool{
+ ID: "pool-1",
+ }
+
+ SetEntity(entity)
+ SetEntityPool(entity.ID, pool)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+
+ DeleteEntityPool(entity.ID, pool.ID)
+ pools := GetEntityPools(entity.ID)
+ c.Require().Len(pools, 0)
+ c.Require().NotContains(pools, pool)
+}
+
+func (c *CacheTestSuite) TestDeleteEntityScaleSet() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ scaleSet := params.ScaleSet{
+ ID: 1,
+ }
+
+ SetEntity(entity)
+ SetEntityScaleSet(entity.ID, scaleSet)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+
+ DeleteEntityScaleSet(entity.ID, scaleSet.ID)
+ scaleSets := GetEntityScaleSets(entity.ID)
+ c.Require().Len(scaleSets, 0)
+ c.Require().NotContains(scaleSets, scaleSet)
+}
+
+func (c *CacheTestSuite) TestFindPoolsMatchingAllTags() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ pool1 := params.Pool{
+ ID: "pool-1",
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag2",
+ },
+ },
+ }
+ pool2 := params.Pool{
+ ID: "pool-2",
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ },
+ }
+ pool3 := params.Pool{
+ ID: "pool-3",
+ Tags: []params.Tag{
+ {
+ Name: "tag3",
+ },
+ },
+ }
+
+ SetEntity(entity)
+ SetEntityPool(entity.ID, pool1)
+ SetEntityPool(entity.ID, pool2)
+ SetEntityPool(entity.ID, pool3)
+
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ pools := FindPoolsMatchingAllTags(entity.ID, []string{"tag1", "tag2"})
+ c.Require().Len(pools, 1)
+ c.Require().Contains(pools, pool1)
+ pools = FindPoolsMatchingAllTags(entity.ID, []string{"tag1"})
+ c.Require().Len(pools, 2)
+ c.Require().Contains(pools, pool1)
+ c.Require().Contains(pools, pool2)
+ pools = FindPoolsMatchingAllTags(entity.ID, []string{"tag3"})
+ c.Require().Len(pools, 1)
+ c.Require().Contains(pools, pool3)
+ pools = FindPoolsMatchingAllTags(entity.ID, []string{"tag4"})
+ c.Require().Len(pools, 0)
+}
+
+func (c *CacheTestSuite) TestGetEntityPools() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ pool1 := params.Pool{
+ ID: "pool-1",
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag2",
+ },
+ },
+ }
+ pool2 := params.Pool{
+ ID: "pool-2",
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag3",
+ },
+ },
+ }
+
+ SetEntity(entity)
+ SetEntityPool(entity.ID, pool1)
+ SetEntityPool(entity.ID, pool2)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ pools := GetEntityPools(entity.ID)
+ c.Require().Len(pools, 2)
+ c.Require().Contains(pools, pool1)
+ c.Require().Contains(pools, pool2)
+}
+
+func (c *CacheTestSuite) TestGetEntityScaleSet() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ scaleSet := params.ScaleSet{
+ ID: 1,
+ }
+
+ SetEntity(entity)
+ SetEntityScaleSet(entity.ID, scaleSet)
+
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ scaleSets, ok := GetEntityScaleSet(entity.ID, scaleSet.ID)
+ c.Require().True(ok)
+ c.Require().Equal(scaleSet.ID, scaleSets.ID)
+}
+
+func (c *CacheTestSuite) TestGetEntityPool() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+
+ pool := params.Pool{
+ ID: "pool-1",
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag2",
+ },
+ },
+ }
+
+ SetEntity(entity)
+ SetEntityPool(entity.ID, pool)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ poolFromCache, ok := GetEntityPool(entity.ID, pool.ID)
+ c.Require().True(ok)
+ c.Require().Equal(pool.ID, poolFromCache.ID)
+}
+
+func (c *CacheTestSuite) TestSetGiteaCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Description: "test description",
+ }
+ SetGiteaCredentials(credentials)
+ cachedCreds, ok := GetGiteaCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+
+ cachedCreds.Description = "new description"
+ SetGiteaCredentials(cachedCreds)
+ cachedCreds, ok = GetGiteaCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+ c.Require().Equal("new description", cachedCreds.Description)
+}
+
+func (c *CacheTestSuite) TestGetAllGiteaCredentials() {
+ credentials1 := params.ForgeCredentials{
+ ID: 1,
+ }
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ }
+ SetGiteaCredentials(credentials1)
+ SetGiteaCredentials(credentials2)
+
+ cachedCreds := GetAllGiteaCredentials()
+ c.Require().Len(cachedCreds, 2)
+ c.Require().Contains(cachedCreds, credentials1)
+ c.Require().Contains(cachedCreds, credentials2)
+}
+
+func (c *CacheTestSuite) TestDeleteGiteaCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ }
+ SetGiteaCredentials(credentials)
+ cachedCreds, ok := GetGiteaCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+
+ DeleteGiteaCredentials(1)
+ cachedCreds, ok = GetGiteaCredentials(1)
+ c.Require().False(ok)
+ c.Require().Equal(params.ForgeCredentials{}, cachedCreds)
+}
+
+func (c *CacheTestSuite) TestDeleteGiteaCredentialsNotFound() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ }
+ SetGiteaCredentials(credentials)
+ cachedCreds, ok := GetGiteaCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+
+ DeleteGiteaCredentials(2)
+ cachedCreds, ok = GetGiteaCredentials(1)
+ c.Require().True(ok)
+ c.Require().Equal(credentials.ID, cachedCreds.ID)
+}
+
+func (c *CacheTestSuite) TestUpdateCredentialsInAffectedEntities() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Description: "test description",
+ }
+ entity1 := params.ForgeEntity{
+ ID: "test-entity-1",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ }
+
+ entity2 := params.ForgeEntity{
+ ID: "test-entity-2",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ }
+
+ SetEntity(entity1)
+ SetEntity(entity2)
+
+ cachedEntity1, ok := GetEntity(entity1.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity1.ID, cachedEntity1.ID)
+ cachedEntity2, ok := GetEntity(entity2.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity2.ID, cachedEntity2.ID)
+
+ c.Require().Equal(credentials.ID, cachedEntity1.Credentials.ID)
+ c.Require().Equal(credentials.ID, cachedEntity2.Credentials.ID)
+ c.Require().Equal(credentials.Description, cachedEntity1.Credentials.Description)
+ c.Require().Equal(credentials.Description, cachedEntity2.Credentials.Description)
+
+ credentials.Description = "new description"
+ SetGiteaCredentials(credentials)
+
+ cachedEntity1, ok = GetEntity(entity1.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity1.ID, cachedEntity1.ID)
+ cachedEntity2, ok = GetEntity(entity2.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity2.ID, cachedEntity2.ID)
+
+ c.Require().Equal(credentials.ID, cachedEntity1.Credentials.ID)
+ c.Require().Equal(credentials.ID, cachedEntity2.Credentials.ID)
+ c.Require().Equal(credentials.Description, cachedEntity1.Credentials.Description)
+ c.Require().Equal(credentials.Description, cachedEntity2.Credentials.Description)
+}
+
+func (c *CacheTestSuite) TestSetGiteaEntity() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Description: "test description",
+ ForgeType: params.GiteaEndpointType,
+ }
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ }
+
+ SetGiteaCredentials(credentials)
+ SetEntity(entity)
+
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ c.Require().Equal(credentials.ID, cachedEntity.Credentials.ID)
+ c.Require().Equal(credentials.Description, cachedEntity.Credentials.Description)
+ c.Require().Equal(credentials.ForgeType, cachedEntity.Credentials.ForgeType)
+}
+
+func (c *CacheTestSuite) TestGetEntitiesUsingCredentials() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Description: "test description",
+ Name: "test",
+ ForgeType: params.GithubEndpointType,
+ }
+
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ Description: "test description2",
+ Name: "test",
+ ForgeType: params.GiteaEndpointType,
+ }
+
+ entity1 := params.ForgeEntity{
+ ID: "test-entity-1",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ }
+
+ entity2 := params.ForgeEntity{
+ ID: "test-entity-2",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ }
+ entity3 := params.ForgeEntity{
+ ID: "test-entity-3",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials2,
+ }
+
+ SetEntity(entity1)
+ SetEntity(entity2)
+ SetEntity(entity3)
+
+ cachedEntities := GetEntitiesUsingCredentials(credentials)
+ c.Require().Len(cachedEntities, 2)
+ c.Require().Contains(cachedEntities, entity1)
+ c.Require().Contains(cachedEntities, entity2)
+
+ cachedEntities = GetEntitiesUsingCredentials(credentials2)
+ c.Require().Len(cachedEntities, 1)
+ c.Require().Contains(cachedEntities, entity3)
+}
+
+func (c *CacheTestSuite) TestGetallEntities() {
+ credentials := params.ForgeCredentials{
+ ID: 1,
+ Description: "test description",
+ Name: "test",
+ ForgeType: params.GithubEndpointType,
+ }
+
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ Description: "test description2",
+ Name: "test",
+ ForgeType: params.GiteaEndpointType,
+ }
+
+ entity1 := params.ForgeEntity{
+ ID: "test-entity-1",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ CreatedAt: time.Now(),
+ }
+
+ entity2 := params.ForgeEntity{
+ ID: "test-entity-2",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials,
+ CreatedAt: time.Now().Add(1 * time.Second),
+ }
+
+ entity3 := params.ForgeEntity{
+ ID: "test-entity-3",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ Credentials: credentials2,
+ CreatedAt: time.Now().Add(2 * time.Second),
+ }
+
+ SetEntity(entity1)
+ SetEntity(entity2)
+ SetEntity(entity3)
+
+ // Sorted by creation date
+ cachedEntities := GetAllEntities()
+ c.Require().Len(cachedEntities, 3)
+ c.Require().Equal(cachedEntities[0], entity1)
+ c.Require().Equal(cachedEntities[1], entity2)
+ c.Require().Equal(cachedEntities[2], entity3)
+}
+
+func (c *CacheTestSuite) TestGetAllPools() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ pool1 := params.Pool{
+ ID: "pool-1",
+ CreatedAt: time.Now(),
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag2",
+ },
+ },
+ }
+
+ pool2 := params.Pool{
+ ID: "pool-2",
+ CreatedAt: time.Now().Add(1 * time.Second),
+ Tags: []params.Tag{
+ {
+ Name: "tag1",
+ },
+ {
+ Name: "tag3",
+ },
+ },
+ }
+
+ SetEntity(entity)
+ SetEntityPool(entity.ID, pool1)
+ SetEntityPool(entity.ID, pool2)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ pools := GetAllPools()
+ c.Require().Len(pools, 2)
+ c.Require().Equal(pools[0].ID, pool1.ID)
+ c.Require().Equal(pools[1].ID, pool2.ID)
+}
+
+func (c *CacheTestSuite) TestGetAllScaleSets() {
+ entity := params.ForgeEntity{
+ ID: "test-entity",
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ Owner: "test",
+ }
+ scaleSet1 := params.ScaleSet{
+ ID: 1,
+ }
+ scaleSet2 := params.ScaleSet{
+ ID: 2,
+ }
+
+ SetEntity(entity)
+ SetEntityScaleSet(entity.ID, scaleSet1)
+ SetEntityScaleSet(entity.ID, scaleSet2)
+ cachedEntity, ok := GetEntity(entity.ID)
+ c.Require().True(ok)
+ c.Require().Equal(entity.ID, cachedEntity.ID)
+ scaleSets := GetAllScaleSets()
+ c.Require().Len(scaleSets, 2)
+ c.Require().Equal(scaleSets[0].ID, scaleSet1.ID)
+ c.Require().Equal(scaleSets[1].ID, scaleSet2.ID)
+}
+
+func (c *CacheTestSuite) TestGetAllGetAllGithubCredentialsAsMap() {
+ credentials1 := params.ForgeCredentials{
+ ID: 1,
+ }
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ }
+ SetGithubCredentials(credentials1)
+ SetGithubCredentials(credentials2)
+
+ cachedCreds := GetAllGithubCredentialsAsMap()
+ c.Require().Len(cachedCreds, 2)
+ c.Require().Contains(cachedCreds, credentials1.ID)
+ c.Require().Contains(cachedCreds, credentials2.ID)
+}
+
+func (c *CacheTestSuite) TestGetAllGiteaCredentialsAsMap() {
+ credentials1 := params.ForgeCredentials{
+ ID: 1,
+ CreatedAt: time.Now(),
+ }
+ credentials2 := params.ForgeCredentials{
+ ID: 2,
+ CreatedAt: time.Now().Add(1 * time.Second),
+ }
+ SetGiteaCredentials(credentials1)
+ SetGiteaCredentials(credentials2)
+
+ cachedCreds := GetAllGiteaCredentialsAsMap()
+ c.Require().Len(cachedCreds, 2)
+ c.Require().Contains(cachedCreds, credentials1.ID)
+ c.Require().Contains(cachedCreds, credentials2.ID)
+}
+
+func TestCacheTestSuite(t *testing.T) {
+ t.Parallel()
+ suite.Run(t, new(CacheTestSuite))
+}
diff --git a/cache/credentials_cache.go b/cache/credentials_cache.go
new file mode 100644
index 00000000..3cb5c71d
--- /dev/null
+++ b/cache/credentials_cache.go
@@ -0,0 +1,148 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "sync"
+
+ "github.com/cloudbase/garm/params"
+)
+
+var (
+ credentialsCache *CredentialCache
+ giteaCredentialsCache *CredentialCache
+)
+
+func init() {
+ ghCredentialsCache := &CredentialCache{
+ cache: make(map[uint]params.ForgeCredentials),
+ }
+ gtCredentialsCache := &CredentialCache{
+ cache: make(map[uint]params.ForgeCredentials),
+ }
+
+ credentialsCache = ghCredentialsCache
+ giteaCredentialsCache = gtCredentialsCache
+}
+
+type CredentialCache struct {
+ mux sync.Mutex
+
+ cache map[uint]params.ForgeCredentials
+}
+
+func (g *CredentialCache) SetCredentialsRateLimit(credsID uint, rateLimit params.GithubRateLimit) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ if creds, ok := g.cache[credsID]; ok {
+ creds.RateLimit = &rateLimit
+ g.cache[credsID] = creds
+ }
+}
+
+func (g *CredentialCache) SetCredentials(credentials params.ForgeCredentials) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ g.cache[credentials.ID] = credentials
+ UpdateCredentialsInAffectedEntities(credentials)
+}
+
+func (g *CredentialCache) GetCredentials(id uint) (params.ForgeCredentials, bool) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ if creds, ok := g.cache[id]; ok {
+ return creds, true
+ }
+ return params.ForgeCredentials{}, false
+}
+
+func (g *CredentialCache) DeleteCredentials(id uint) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ delete(g.cache, id)
+}
+
+func (g *CredentialCache) GetAllCredentials() []params.ForgeCredentials {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ creds := make([]params.ForgeCredentials, 0, len(g.cache))
+ for _, cred := range g.cache {
+ creds = append(creds, cred)
+ }
+
+ // Sort the credentials by ID
+ sortByID(creds)
+ return creds
+}
+
+func (g *CredentialCache) GetAllCredentialsAsMap() map[uint]params.ForgeCredentials {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ creds := make(map[uint]params.ForgeCredentials, len(g.cache))
+ for id, cred := range g.cache {
+ creds[id] = cred
+ }
+
+ return creds
+}
+
+func SetGithubCredentials(credentials params.ForgeCredentials) {
+ credentialsCache.SetCredentials(credentials)
+}
+
+func GetGithubCredentials(id uint) (params.ForgeCredentials, bool) {
+ return credentialsCache.GetCredentials(id)
+}
+
+func DeleteGithubCredentials(id uint) {
+ credentialsCache.DeleteCredentials(id)
+}
+
+func GetAllGithubCredentials() []params.ForgeCredentials {
+ return credentialsCache.GetAllCredentials()
+}
+
+func SetCredentialsRateLimit(credsID uint, rateLimit params.GithubRateLimit) {
+ credentialsCache.SetCredentialsRateLimit(credsID, rateLimit)
+}
+
+func GetAllGithubCredentialsAsMap() map[uint]params.ForgeCredentials {
+ return credentialsCache.GetAllCredentialsAsMap()
+}
+
+func SetGiteaCredentials(credentials params.ForgeCredentials) {
+ giteaCredentialsCache.SetCredentials(credentials)
+}
+
+func GetGiteaCredentials(id uint) (params.ForgeCredentials, bool) {
+ return giteaCredentialsCache.GetCredentials(id)
+}
+
+func DeleteGiteaCredentials(id uint) {
+ giteaCredentialsCache.DeleteCredentials(id)
+}
+
+func GetAllGiteaCredentials() []params.ForgeCredentials {
+ return giteaCredentialsCache.GetAllCredentials()
+}
+
+func GetAllGiteaCredentialsAsMap() map[uint]params.ForgeCredentials {
+ return giteaCredentialsCache.GetAllCredentialsAsMap()
+}
diff --git a/cache/entity_cache.go b/cache/entity_cache.go
new file mode 100644
index 00000000..c676332f
--- /dev/null
+++ b/cache/entity_cache.go
@@ -0,0 +1,435 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "sync"
+ "time"
+
+ "github.com/cloudbase/garm/params"
+)
+
+var entityCache *EntityCache
+
+func init() {
+ ghEntityCache := &EntityCache{
+ entities: make(map[string]EntityItem),
+ }
+ entityCache = ghEntityCache
+}
+
+type RunnerGroupEntry struct {
+ RunnerGroupID int64
+ time time.Time
+}
+
+type EntityItem struct {
+ Entity params.ForgeEntity
+ Pools map[string]params.Pool
+ ScaleSets map[uint]params.ScaleSet
+ RunnerGroups map[string]RunnerGroupEntry
+}
+
+type EntityCache struct {
+ mux sync.Mutex
+ // entity IDs are UUID4s. It is highly unlikely they will collide (🤞).
+ entities map[string]EntityItem
+}
+
+func (e *EntityCache) UpdateCredentialsInAffectedEntities(creds params.ForgeCredentials) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ for entityID, cache := range e.entities {
+ if cache.Entity.Credentials.GetID() == creds.GetID() {
+ cache.Entity.Credentials = creds
+ e.entities[entityID] = cache
+ }
+ }
+}
+
+func (e *EntityCache) GetEntity(entityID string) (params.ForgeEntity, bool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ var creds params.ForgeCredentials
+ var ok bool
+ switch cache.Entity.Credentials.ForgeType {
+ case params.GithubEndpointType:
+ creds, ok = GetGithubCredentials(cache.Entity.Credentials.ID)
+ case params.GiteaEndpointType:
+ creds, ok = GetGiteaCredentials(cache.Entity.Credentials.ID)
+ }
+ if ok {
+ cache.Entity.Credentials = creds
+ }
+ return cache.Entity, true
+ }
+ return params.ForgeEntity{}, false
+}
+
+func (e *EntityCache) SetEntity(entity params.ForgeEntity) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ cache, ok := e.entities[entity.ID]
+ if !ok {
+ e.entities[entity.ID] = EntityItem{
+ Entity: entity,
+ Pools: make(map[string]params.Pool),
+ ScaleSets: make(map[uint]params.ScaleSet),
+ RunnerGroups: make(map[string]RunnerGroupEntry),
+ }
+ return
+ }
+ cache.Entity = entity
+ e.entities[entity.ID] = cache
+}
+
+func (e *EntityCache) ReplaceEntityPools(entityID string, pools []params.Pool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ cache, ok := e.entities[entityID]
+ if !ok {
+ return
+ }
+
+ poolsByID := map[string]params.Pool{}
+ for _, pool := range pools {
+ poolsByID[pool.ID] = pool
+ }
+ cache.Pools = poolsByID
+ e.entities[entityID] = cache
+}
+
+func (e *EntityCache) ReplaceEntityScaleSets(entityID string, scaleSets []params.ScaleSet) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ cache, ok := e.entities[entityID]
+ if !ok {
+ return
+ }
+
+ scaleSetsByID := map[uint]params.ScaleSet{}
+ for _, scaleSet := range scaleSets {
+ scaleSetsByID[scaleSet.ID] = scaleSet
+ }
+ cache.ScaleSets = scaleSetsByID
+ e.entities[entityID] = cache
+}
+
+func (e *EntityCache) DeleteEntity(entityID string) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+ delete(e.entities, entityID)
+}
+
+func (e *EntityCache) SetEntityPool(entityID string, pool params.Pool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ cache.Pools[pool.ID] = pool
+ e.entities[entityID] = cache
+ }
+}
+
+func (e *EntityCache) SetEntityScaleSet(entityID string, scaleSet params.ScaleSet) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ cache.ScaleSets[scaleSet.ID] = scaleSet
+ e.entities[entityID] = cache
+ }
+}
+
+func (e *EntityCache) DeleteEntityPool(entityID string, poolID string) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ delete(cache.Pools, poolID)
+ e.entities[entityID] = cache
+ }
+}
+
+func (e *EntityCache) DeleteEntityScaleSet(entityID string, scaleSetID uint) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ delete(cache.ScaleSets, scaleSetID)
+ e.entities[entityID] = cache
+ }
+}
+
+func (e *EntityCache) GetEntityPool(entityID string, poolID string) (params.Pool, bool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ if pool, ok := cache.Pools[poolID]; ok {
+ return pool, true
+ }
+ }
+ return params.Pool{}, false
+}
+
+func (e *EntityCache) GetEntityScaleSet(entityID string, scaleSetID uint) (params.ScaleSet, bool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ if scaleSet, ok := cache.ScaleSets[scaleSetID]; ok {
+ return scaleSet, true
+ }
+ }
+ return params.ScaleSet{}, false
+}
+
+func (e *EntityCache) FindPoolsMatchingAllTags(entityID string, tags []string) []params.Pool {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ var pools []params.Pool
+ for _, pool := range cache.Pools {
+ if pool.HasRequiredLabels(tags) {
+ pools = append(pools, pool)
+ }
+ }
+ // Sort the pools by creation date.
+ sortByCreationDate(pools)
+ return pools
+ }
+ return nil
+}
+
+func (e *EntityCache) GetEntityPools(entityID string) []params.Pool {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ var pools []params.Pool
+ for _, pool := range cache.Pools {
+ pools = append(pools, pool)
+ }
+ // Sort the pools by creation date.
+ sortByCreationDate(pools)
+ return pools
+ }
+ return nil
+}
+
+func (e *EntityCache) GetEntityScaleSets(entityID string) []params.ScaleSet {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if cache, ok := e.entities[entityID]; ok {
+ var scaleSets []params.ScaleSet
+ for _, scaleSet := range cache.ScaleSets {
+ scaleSets = append(scaleSets, scaleSet)
+ }
+ // Sort the scale sets by creation date.
+ sortByID(scaleSets)
+ return scaleSets
+ }
+ return nil
+}
+
+func (e *EntityCache) GetEntitiesUsingCredentials(creds params.ForgeCredentials) []params.ForgeEntity {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ var entities []params.ForgeEntity
+ for _, cache := range e.entities {
+ if cache.Entity.Credentials.ForgeType != creds.ForgeType {
+ continue
+ }
+
+ if cache.Entity.Credentials.GetID() == creds.GetID() {
+ entities = append(entities, cache.Entity)
+ }
+ }
+ sortByCreationDate(entities)
+ return entities
+}
+
+func (e *EntityCache) GetAllEntities() []params.ForgeEntity {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ var entities []params.ForgeEntity
+ for _, cache := range e.entities {
+ // Get the credentials from the credentials cache.
+ var creds params.ForgeCredentials
+ var ok bool
+ switch cache.Entity.Credentials.ForgeType {
+ case params.GithubEndpointType:
+ creds, ok = GetGithubCredentials(cache.Entity.Credentials.ID)
+ case params.GiteaEndpointType:
+ creds, ok = GetGiteaCredentials(cache.Entity.Credentials.ID)
+ }
+ if ok {
+ cache.Entity.Credentials = creds
+ }
+ entities = append(entities, cache.Entity)
+ }
+ sortByCreationDate(entities)
+ return entities
+}
+
+func (e *EntityCache) GetAllPools() []params.Pool {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ var pools []params.Pool
+ for _, cache := range e.entities {
+ for _, pool := range cache.Pools {
+ pools = append(pools, pool)
+ }
+ }
+ sortByCreationDate(pools)
+ return pools
+}
+
+func (e *EntityCache) GetAllScaleSets() []params.ScaleSet {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ var scaleSets []params.ScaleSet
+ for _, cache := range e.entities {
+ for _, scaleSet := range cache.ScaleSets {
+ scaleSets = append(scaleSets, scaleSet)
+ }
+ }
+ sortByID(scaleSets)
+ return scaleSets
+}
+
+func (e *EntityCache) SetEntityRunnerGroup(entityID, runnerGroupName string, runnerGroupID int64) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if _, ok := e.entities[entityID]; ok {
+ e.entities[entityID].RunnerGroups[runnerGroupName] = RunnerGroupEntry{
+ RunnerGroupID: runnerGroupID,
+ time: time.Now().UTC(),
+ }
+ }
+}
+
+func (e *EntityCache) GetEntityRunnerGroup(entityID, runnerGroupName string) (int64, bool) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ if _, ok := e.entities[entityID]; ok {
+ if runnerGroup, ok := e.entities[entityID].RunnerGroups[runnerGroupName]; ok {
+ if time.Now().UTC().After(runnerGroup.time.Add(1 * time.Hour)) {
+ delete(e.entities[entityID].RunnerGroups, runnerGroupName)
+ return 0, false
+ }
+ return runnerGroup.RunnerGroupID, true
+ }
+ }
+ return 0, false
+}
+
+func SetEntityRunnerGroup(entityID, runnerGroupName string, runnerGroupID int64) {
+ entityCache.SetEntityRunnerGroup(entityID, runnerGroupName, runnerGroupID)
+}
+
+func GetEntityRunnerGroup(entityID, runnerGroupName string) (int64, bool) {
+ return entityCache.GetEntityRunnerGroup(entityID, runnerGroupName)
+}
+
+func GetEntity(entityID string) (params.ForgeEntity, bool) {
+ return entityCache.GetEntity(entityID)
+}
+
+func SetEntity(entity params.ForgeEntity) {
+ entityCache.SetEntity(entity)
+}
+
+func ReplaceEntityPools(entityID string, pools []params.Pool) {
+ entityCache.ReplaceEntityPools(entityID, pools)
+}
+
+func ReplaceEntityScaleSets(entityID string, scaleSets []params.ScaleSet) {
+ entityCache.ReplaceEntityScaleSets(entityID, scaleSets)
+}
+
+func DeleteEntity(entityID string) {
+ entityCache.DeleteEntity(entityID)
+}
+
+func SetEntityPool(entityID string, pool params.Pool) {
+ entityCache.SetEntityPool(entityID, pool)
+}
+
+func SetEntityScaleSet(entityID string, scaleSet params.ScaleSet) {
+ entityCache.SetEntityScaleSet(entityID, scaleSet)
+}
+
+func DeleteEntityPool(entityID string, poolID string) {
+ entityCache.DeleteEntityPool(entityID, poolID)
+}
+
+func DeleteEntityScaleSet(entityID string, scaleSetID uint) {
+ entityCache.DeleteEntityScaleSet(entityID, scaleSetID)
+}
+
+func GetEntityPool(entityID string, poolID string) (params.Pool, bool) {
+ return entityCache.GetEntityPool(entityID, poolID)
+}
+
+func GetEntityScaleSet(entityID string, scaleSetID uint) (params.ScaleSet, bool) {
+ return entityCache.GetEntityScaleSet(entityID, scaleSetID)
+}
+
+func FindPoolsMatchingAllTags(entityID string, tags []string) []params.Pool {
+ return entityCache.FindPoolsMatchingAllTags(entityID, tags)
+}
+
+func GetEntityPools(entityID string) []params.Pool {
+ return entityCache.GetEntityPools(entityID)
+}
+
+func GetEntityScaleSets(entityID string) []params.ScaleSet {
+ return entityCache.GetEntityScaleSets(entityID)
+}
+
+func UpdateCredentialsInAffectedEntities(creds params.ForgeCredentials) {
+ entityCache.UpdateCredentialsInAffectedEntities(creds)
+}
+
+func GetEntitiesUsingCredentials(creds params.ForgeCredentials) []params.ForgeEntity {
+ return entityCache.GetEntitiesUsingCredentials(creds)
+}
+
+func GetAllEntities() []params.ForgeEntity {
+ return entityCache.GetAllEntities()
+}
+
+func GetAllPools() []params.Pool {
+ return entityCache.GetAllPools()
+}
+
+func GetAllScaleSets() []params.ScaleSet {
+ return entityCache.GetAllScaleSets()
+}
diff --git a/cache/github_client.go b/cache/github_client.go
new file mode 100644
index 00000000..179a9718
--- /dev/null
+++ b/cache/github_client.go
@@ -0,0 +1,60 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "sync"
+
+ "github.com/cloudbase/garm/runner/common"
+)
+
+var ghClientCache *GithubClientCache
+
+type GithubClientCache struct {
+ mux sync.Mutex
+
+ cache map[string]common.GithubClient
+}
+
+func init() {
+ clientCache := &GithubClientCache{
+ cache: make(map[string]common.GithubClient),
+ }
+ ghClientCache = clientCache
+}
+
+func (g *GithubClientCache) SetClient(entityID string, client common.GithubClient) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ g.cache[entityID] = client
+}
+
+func (g *GithubClientCache) GetClient(entityID string) (common.GithubClient, bool) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ if client, ok := g.cache[entityID]; ok {
+ return client, true
+ }
+ return nil, false
+}
+
+func SetGithubClient(entityID string, client common.GithubClient) {
+ ghClientCache.SetClient(entityID, client)
+}
+
+func GetGithubClient(entityID string) (common.GithubClient, bool) {
+ return ghClientCache.GetClient(entityID)
+}
diff --git a/cache/instance_cache.go b/cache/instance_cache.go
new file mode 100644
index 00000000..ae2c1cec
--- /dev/null
+++ b/cache/instance_cache.go
@@ -0,0 +1,143 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "sync"
+
+ "github.com/cloudbase/garm/params"
+)
+
+var instanceCache *InstanceCache
+
+func init() {
+ cache := &InstanceCache{
+ cache: make(map[string]params.Instance),
+ }
+ instanceCache = cache
+}
+
+type InstanceCache struct {
+ mux sync.Mutex
+
+ cache map[string]params.Instance
+}
+
+func (i *InstanceCache) SetInstance(instance params.Instance) {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ i.cache[instance.Name] = instance
+}
+
+func (i *InstanceCache) GetInstance(name string) (params.Instance, bool) {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ if instance, ok := i.cache[name]; ok {
+ return instance, true
+ }
+ return params.Instance{}, false
+}
+
+func (i *InstanceCache) DeleteInstance(name string) {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ delete(i.cache, name)
+}
+
+func (i *InstanceCache) GetAllInstances() []params.Instance {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ instances := make([]params.Instance, 0, len(i.cache))
+ for _, instance := range i.cache {
+ instances = append(instances, instance)
+ }
+ sortByCreationDate(instances)
+ return instances
+}
+
+func (i *InstanceCache) GetInstancesForPool(poolID string) []params.Instance {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ var filteredInstances []params.Instance
+ for _, instance := range i.cache {
+ if instance.PoolID == poolID {
+ filteredInstances = append(filteredInstances, instance)
+ }
+ }
+ sortByCreationDate(filteredInstances)
+ return filteredInstances
+}
+
+func (i *InstanceCache) GetInstancesForScaleSet(scaleSetID uint) []params.Instance {
+ i.mux.Lock()
+ defer i.mux.Unlock()
+
+ var filteredInstances []params.Instance
+ for _, instance := range i.cache {
+ if instance.ScaleSetID == scaleSetID {
+ filteredInstances = append(filteredInstances, instance)
+ }
+ }
+ sortByCreationDate(filteredInstances)
+ return filteredInstances
+}
+
+func (i *InstanceCache) GetEntityInstances(entityID string) []params.Instance {
+ pools := GetEntityPools(entityID)
+ poolsAsMap := map[string]bool{}
+ for _, pool := range pools {
+ poolsAsMap[pool.ID] = true
+ }
+
+ ret := []params.Instance{}
+ for _, val := range i.GetAllInstances() {
+ if _, ok := poolsAsMap[val.PoolID]; ok {
+ ret = append(ret, val)
+ }
+ }
+ return ret
+}
+
+func SetInstanceCache(instance params.Instance) {
+ instanceCache.SetInstance(instance)
+}
+
+func GetInstanceCache(name string) (params.Instance, bool) {
+ return instanceCache.GetInstance(name)
+}
+
+func DeleteInstanceCache(name string) {
+ instanceCache.DeleteInstance(name)
+}
+
+func GetAllInstancesCache() []params.Instance {
+ return instanceCache.GetAllInstances()
+}
+
+func GetInstancesForPool(poolID string) []params.Instance {
+ return instanceCache.GetInstancesForPool(poolID)
+}
+
+func GetInstancesForScaleSet(scaleSetID uint) []params.Instance {
+ return instanceCache.GetInstancesForScaleSet(scaleSetID)
+}
+
+func GetEntityInstances(entityID string) []params.Instance {
+ return instanceCache.GetEntityInstances(entityID)
+}
diff --git a/cache/tools_cache.go b/cache/tools_cache.go
new file mode 100644
index 00000000..30e83a0e
--- /dev/null
+++ b/cache/tools_cache.go
@@ -0,0 +1,116 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/params"
+)
+
+var githubToolsCache *GithubToolsCache
+
+func init() {
+ ghToolsCache := &GithubToolsCache{
+ entities: make(map[string]GithubEntityTools),
+ }
+ githubToolsCache = ghToolsCache
+}
+
+type GithubEntityTools struct {
+ updatedAt time.Time
+ expiresAt time.Time
+ err error
+ entity params.ForgeEntity
+ tools []commonParams.RunnerApplicationDownload
+}
+
+type GithubToolsCache struct {
+ mux sync.Mutex
+ // entity IDs are UUID4s. It is highly unlikely they will collide (🤞).
+ entities map[string]GithubEntityTools
+}
+
+func (g *GithubToolsCache) Get(entityID string) ([]commonParams.RunnerApplicationDownload, error) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ if cache, ok := g.entities[entityID]; ok {
+ if cache.entity.Credentials.ForgeType == params.GithubEndpointType {
+ if time.Now().UTC().After(cache.expiresAt.Add(-5 * time.Minute)) {
+ // Stale cache, remove it.
+ delete(g.entities, entityID)
+ return nil, fmt.Errorf("cache expired for entity %s", entityID)
+ }
+ }
+ if cache.err != nil {
+ return nil, cache.err
+ }
+ return cache.tools, nil
+ }
+ return nil, fmt.Errorf("no cache found for entity %s", entityID)
+}
+
+func (g *GithubToolsCache) Set(entity params.ForgeEntity, tools []commonParams.RunnerApplicationDownload) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ forgeTools := GithubEntityTools{
+ updatedAt: time.Now(),
+ entity: entity,
+ tools: tools,
+ err: nil,
+ }
+
+ if entity.Credentials.ForgeType == params.GithubEndpointType {
+ forgeTools.expiresAt = time.Now().Add(1 * time.Hour)
+ }
+
+ g.entities[entity.ID] = forgeTools
+}
+
+func (g *GithubToolsCache) SetToolsError(entity params.ForgeEntity, err error) {
+ g.mux.Lock()
+ defer g.mux.Unlock()
+
+ // If the entity is not in the cache, add it with the error.
+ cache, ok := g.entities[entity.ID]
+ if !ok {
+ g.entities[entity.ID] = GithubEntityTools{
+ updatedAt: time.Now(),
+ entity: entity,
+ err: err,
+ }
+ return
+ }
+
+ // Update the error for the existing entity.
+ cache.err = err
+ g.entities[entity.ID] = cache
+}
+
+func SetGithubToolsCache(entity params.ForgeEntity, tools []commonParams.RunnerApplicationDownload) {
+ githubToolsCache.Set(entity, tools)
+}
+
+func GetGithubToolsCache(entityID string) ([]commonParams.RunnerApplicationDownload, error) {
+ return githubToolsCache.Get(entityID)
+}
+
+func SetGithubToolsCacheError(entity params.ForgeEntity, err error) {
+ githubToolsCache.SetToolsError(entity, err)
+}
diff --git a/cache/util.go b/cache/util.go
new file mode 100644
index 00000000..5fd234a9
--- /dev/null
+++ b/cache/util.go
@@ -0,0 +1,32 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cache
+
+import (
+ "sort"
+
+ "github.com/cloudbase/garm/params"
+)
+
+func sortByID[T params.IDGetter](s []T) {
+ sort.Slice(s, func(i, j int) bool {
+ return s[i].GetID() < s[j].GetID()
+ })
+}
+
+func sortByCreationDate[T params.CreationDateGetter](s []T) {
+ sort.Slice(s, func(i, j int) bool {
+ return s[i].GetCreatedAt().Before(s[j].GetCreatedAt())
+ })
+}
diff --git a/client/controller/controller_client.go b/client/controller/controller_client.go
new file mode 100644
index 00000000..cf6cde1a
--- /dev/null
+++ b/client/controller/controller_client.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new controller API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new controller API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new controller API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for controller API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ UpdateController(params *UpdateControllerParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateControllerOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+UpdateController updates controller
+*/
+func (a *Client) UpdateController(params *UpdateControllerParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateControllerOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateControllerParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateController",
+ Method: "PUT",
+ PathPattern: "/controller",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateControllerReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateControllerOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for UpdateController: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/controller/update_controller_parameters.go b/client/controller/update_controller_parameters.go
new file mode 100644
index 00000000..a0705d60
--- /dev/null
+++ b/client/controller/update_controller_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateControllerParams creates a new UpdateControllerParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateControllerParams() *UpdateControllerParams {
+ return &UpdateControllerParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateControllerParamsWithTimeout creates a new UpdateControllerParams object
+// with the ability to set a timeout on a request.
+func NewUpdateControllerParamsWithTimeout(timeout time.Duration) *UpdateControllerParams {
+ return &UpdateControllerParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateControllerParamsWithContext creates a new UpdateControllerParams object
+// with the ability to set a context for a request.
+func NewUpdateControllerParamsWithContext(ctx context.Context) *UpdateControllerParams {
+ return &UpdateControllerParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateControllerParamsWithHTTPClient creates a new UpdateControllerParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateControllerParamsWithHTTPClient(client *http.Client) *UpdateControllerParams {
+ return &UpdateControllerParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateControllerParams contains all the parameters to send to the API endpoint
+
+ for the update controller operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateControllerParams struct {
+
+ /* Body.
+
+ Parameters used when updating the controller.
+ */
+ Body garm_params.UpdateControllerParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update controller params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateControllerParams) WithDefaults() *UpdateControllerParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update controller params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateControllerParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update controller params
+func (o *UpdateControllerParams) WithTimeout(timeout time.Duration) *UpdateControllerParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update controller params
+func (o *UpdateControllerParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update controller params
+func (o *UpdateControllerParams) WithContext(ctx context.Context) *UpdateControllerParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update controller params
+func (o *UpdateControllerParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update controller params
+func (o *UpdateControllerParams) WithHTTPClient(client *http.Client) *UpdateControllerParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update controller params
+func (o *UpdateControllerParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update controller params
+func (o *UpdateControllerParams) WithBody(body garm_params.UpdateControllerParams) *UpdateControllerParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update controller params
+func (o *UpdateControllerParams) SetBody(body garm_params.UpdateControllerParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateControllerParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/controller/update_controller_responses.go b/client/controller/update_controller_responses.go
new file mode 100644
index 00000000..f555a78e
--- /dev/null
+++ b/client/controller/update_controller_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateControllerReader is a Reader for the UpdateController structure.
+type UpdateControllerReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateControllerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateControllerOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewUpdateControllerBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[PUT /controller] UpdateController", response, response.Code())
+ }
+}
+
+// NewUpdateControllerOK creates a UpdateControllerOK with default headers values
+func NewUpdateControllerOK() *UpdateControllerOK {
+ return &UpdateControllerOK{}
+}
+
+/*
+UpdateControllerOK describes a response with status code 200, with default header values.
+
+ControllerInfo
+*/
+type UpdateControllerOK struct {
+ Payload garm_params.ControllerInfo
+}
+
+// IsSuccess returns true when this update controller o k response has a 2xx status code
+func (o *UpdateControllerOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update controller o k response has a 3xx status code
+func (o *UpdateControllerOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update controller o k response has a 4xx status code
+func (o *UpdateControllerOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update controller o k response has a 5xx status code
+func (o *UpdateControllerOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update controller o k response a status code equal to that given
+func (o *UpdateControllerOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update controller o k response
+func (o *UpdateControllerOK) Code() int {
+ return 200
+}
+
+func (o *UpdateControllerOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /controller][%d] updateControllerOK %s", 200, payload)
+}
+
+func (o *UpdateControllerOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /controller][%d] updateControllerOK %s", 200, payload)
+}
+
+func (o *UpdateControllerOK) GetPayload() garm_params.ControllerInfo {
+ return o.Payload
+}
+
+func (o *UpdateControllerOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateControllerBadRequest creates a UpdateControllerBadRequest with default headers values
+func NewUpdateControllerBadRequest() *UpdateControllerBadRequest {
+ return &UpdateControllerBadRequest{}
+}
+
+/*
+UpdateControllerBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type UpdateControllerBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update controller bad request response has a 2xx status code
+func (o *UpdateControllerBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this update controller bad request response has a 3xx status code
+func (o *UpdateControllerBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update controller bad request response has a 4xx status code
+func (o *UpdateControllerBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this update controller bad request response has a 5xx status code
+func (o *UpdateControllerBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update controller bad request response a status code equal to that given
+func (o *UpdateControllerBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the update controller bad request response
+func (o *UpdateControllerBadRequest) Code() int {
+ return 400
+}
+
+func (o *UpdateControllerBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /controller][%d] updateControllerBadRequest %s", 400, payload)
+}
+
+func (o *UpdateControllerBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /controller][%d] updateControllerBadRequest %s", 400, payload)
+}
+
+func (o *UpdateControllerBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateControllerBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/controller_info/controller_info_client.go b/client/controller_info/controller_info_client.go
new file mode 100644
index 00000000..bccd4e06
--- /dev/null
+++ b/client/controller_info/controller_info_client.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller_info
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new controller info API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new controller info API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new controller info API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for controller info API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ ControllerInfo(params *ControllerInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ControllerInfoOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+ControllerInfo gets controller info
+*/
+func (a *Client) ControllerInfo(params *ControllerInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ControllerInfoOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewControllerInfoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ControllerInfo",
+ Method: "GET",
+ PathPattern: "/controller-info",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ControllerInfoReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ControllerInfoOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for ControllerInfo: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/controller_info/controller_info_parameters.go b/client/controller_info/controller_info_parameters.go
new file mode 100644
index 00000000..f4d33ef6
--- /dev/null
+++ b/client/controller_info/controller_info_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller_info
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewControllerInfoParams creates a new ControllerInfoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewControllerInfoParams() *ControllerInfoParams {
+ return &ControllerInfoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewControllerInfoParamsWithTimeout creates a new ControllerInfoParams object
+// with the ability to set a timeout on a request.
+func NewControllerInfoParamsWithTimeout(timeout time.Duration) *ControllerInfoParams {
+ return &ControllerInfoParams{
+ timeout: timeout,
+ }
+}
+
+// NewControllerInfoParamsWithContext creates a new ControllerInfoParams object
+// with the ability to set a context for a request.
+func NewControllerInfoParamsWithContext(ctx context.Context) *ControllerInfoParams {
+ return &ControllerInfoParams{
+ Context: ctx,
+ }
+}
+
+// NewControllerInfoParamsWithHTTPClient creates a new ControllerInfoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewControllerInfoParamsWithHTTPClient(client *http.Client) *ControllerInfoParams {
+ return &ControllerInfoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ControllerInfoParams contains all the parameters to send to the API endpoint
+
+ for the controller info operation.
+
+ Typically these are written to a http.Request.
+*/
+type ControllerInfoParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the controller info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ControllerInfoParams) WithDefaults() *ControllerInfoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the controller info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ControllerInfoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the controller info params
+func (o *ControllerInfoParams) WithTimeout(timeout time.Duration) *ControllerInfoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the controller info params
+func (o *ControllerInfoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the controller info params
+func (o *ControllerInfoParams) WithContext(ctx context.Context) *ControllerInfoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the controller info params
+func (o *ControllerInfoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the controller info params
+func (o *ControllerInfoParams) WithHTTPClient(client *http.Client) *ControllerInfoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the controller info params
+func (o *ControllerInfoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ControllerInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/controller_info/controller_info_responses.go b/client/controller_info/controller_info_responses.go
new file mode 100644
index 00000000..06ec1b7f
--- /dev/null
+++ b/client/controller_info/controller_info_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package controller_info
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ControllerInfoReader is a Reader for the ControllerInfo structure.
+type ControllerInfoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ControllerInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewControllerInfoOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 409:
+ result := NewControllerInfoConflict()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /controller-info] ControllerInfo", response, response.Code())
+ }
+}
+
+// NewControllerInfoOK creates a ControllerInfoOK with default headers values
+func NewControllerInfoOK() *ControllerInfoOK {
+ return &ControllerInfoOK{}
+}
+
+/*
+ControllerInfoOK describes a response with status code 200, with default header values.
+
+ControllerInfo
+*/
+type ControllerInfoOK struct {
+ Payload garm_params.ControllerInfo
+}
+
+// IsSuccess returns true when this controller info o k response has a 2xx status code
+func (o *ControllerInfoOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this controller info o k response has a 3xx status code
+func (o *ControllerInfoOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this controller info o k response has a 4xx status code
+func (o *ControllerInfoOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this controller info o k response has a 5xx status code
+func (o *ControllerInfoOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this controller info o k response a status code equal to that given
+func (o *ControllerInfoOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the controller info o k response
+func (o *ControllerInfoOK) Code() int {
+ return 200
+}
+
+func (o *ControllerInfoOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /controller-info][%d] controllerInfoOK %s", 200, payload)
+}
+
+func (o *ControllerInfoOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /controller-info][%d] controllerInfoOK %s", 200, payload)
+}
+
+func (o *ControllerInfoOK) GetPayload() garm_params.ControllerInfo {
+ return o.Payload
+}
+
+func (o *ControllerInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewControllerInfoConflict creates a ControllerInfoConflict with default headers values
+func NewControllerInfoConflict() *ControllerInfoConflict {
+ return &ControllerInfoConflict{}
+}
+
+/*
+ControllerInfoConflict describes a response with status code 409, with default header values.
+
+APIErrorResponse
+*/
+type ControllerInfoConflict struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this controller info conflict response has a 2xx status code
+func (o *ControllerInfoConflict) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this controller info conflict response has a 3xx status code
+func (o *ControllerInfoConflict) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this controller info conflict response has a 4xx status code
+func (o *ControllerInfoConflict) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this controller info conflict response has a 5xx status code
+func (o *ControllerInfoConflict) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this controller info conflict response a status code equal to that given
+func (o *ControllerInfoConflict) IsCode(code int) bool {
+ return code == 409
+}
+
+// Code gets the status code for the controller info conflict response
+func (o *ControllerInfoConflict) Code() int {
+ return 409
+}
+
+func (o *ControllerInfoConflict) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /controller-info][%d] controllerInfoConflict %s", 409, payload)
+}
+
+func (o *ControllerInfoConflict) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /controller-info][%d] controllerInfoConflict %s", 409, payload)
+}
+
+func (o *ControllerInfoConflict) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ControllerInfoConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/create_credentials_parameters.go b/client/credentials/create_credentials_parameters.go
new file mode 100644
index 00000000..4288808f
--- /dev/null
+++ b/client/credentials/create_credentials_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateCredentialsParams creates a new CreateCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateCredentialsParams() *CreateCredentialsParams {
+ return &CreateCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateCredentialsParamsWithTimeout creates a new CreateCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewCreateCredentialsParamsWithTimeout(timeout time.Duration) *CreateCredentialsParams {
+ return &CreateCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateCredentialsParamsWithContext creates a new CreateCredentialsParams object
+// with the ability to set a context for a request.
+func NewCreateCredentialsParamsWithContext(ctx context.Context) *CreateCredentialsParams {
+ return &CreateCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateCredentialsParamsWithHTTPClient creates a new CreateCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateCredentialsParamsWithHTTPClient(client *http.Client) *CreateCredentialsParams {
+ return &CreateCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the create credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateCredentialsParams struct {
+
+ /* Body.
+
+ Parameters used when creating a GitHub credential.
+ */
+ Body garm_params.CreateGithubCredentialsParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateCredentialsParams) WithDefaults() *CreateCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create credentials params
+func (o *CreateCredentialsParams) WithTimeout(timeout time.Duration) *CreateCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create credentials params
+func (o *CreateCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create credentials params
+func (o *CreateCredentialsParams) WithContext(ctx context.Context) *CreateCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create credentials params
+func (o *CreateCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create credentials params
+func (o *CreateCredentialsParams) WithHTTPClient(client *http.Client) *CreateCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create credentials params
+func (o *CreateCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create credentials params
+func (o *CreateCredentialsParams) WithBody(body garm_params.CreateGithubCredentialsParams) *CreateCredentialsParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create credentials params
+func (o *CreateCredentialsParams) SetBody(body garm_params.CreateGithubCredentialsParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/create_credentials_responses.go b/client/credentials/create_credentials_responses.go
new file mode 100644
index 00000000..a0037edf
--- /dev/null
+++ b/client/credentials/create_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateCredentialsReader is a Reader for the CreateCredentials structure.
+type CreateCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewCreateCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[POST /github/credentials] CreateCredentials", response, response.Code())
+ }
+}
+
+// NewCreateCredentialsOK creates a CreateCredentialsOK with default headers values
+func NewCreateCredentialsOK() *CreateCredentialsOK {
+ return &CreateCredentialsOK{}
+}
+
+/*
+CreateCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type CreateCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this create credentials o k response has a 2xx status code
+func (o *CreateCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create credentials o k response has a 3xx status code
+func (o *CreateCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create credentials o k response has a 4xx status code
+func (o *CreateCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create credentials o k response has a 5xx status code
+func (o *CreateCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create credentials o k response a status code equal to that given
+func (o *CreateCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create credentials o k response
+func (o *CreateCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *CreateCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/credentials][%d] createCredentialsOK %s", 200, payload)
+}
+
+func (o *CreateCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/credentials][%d] createCredentialsOK %s", 200, payload)
+}
+
+func (o *CreateCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *CreateCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateCredentialsBadRequest creates a CreateCredentialsBadRequest with default headers values
+func NewCreateCredentialsBadRequest() *CreateCredentialsBadRequest {
+ return &CreateCredentialsBadRequest{}
+}
+
+/*
+CreateCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type CreateCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create credentials bad request response has a 2xx status code
+func (o *CreateCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this create credentials bad request response has a 3xx status code
+func (o *CreateCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create credentials bad request response has a 4xx status code
+func (o *CreateCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this create credentials bad request response has a 5xx status code
+func (o *CreateCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create credentials bad request response a status code equal to that given
+func (o *CreateCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the create credentials bad request response
+func (o *CreateCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *CreateCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/credentials][%d] createCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *CreateCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/credentials][%d] createCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *CreateCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/create_gitea_credentials_parameters.go b/client/credentials/create_gitea_credentials_parameters.go
new file mode 100644
index 00000000..6e255bfa
--- /dev/null
+++ b/client/credentials/create_gitea_credentials_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateGiteaCredentialsParams creates a new CreateGiteaCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateGiteaCredentialsParams() *CreateGiteaCredentialsParams {
+ return &CreateGiteaCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateGiteaCredentialsParamsWithTimeout creates a new CreateGiteaCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewCreateGiteaCredentialsParamsWithTimeout(timeout time.Duration) *CreateGiteaCredentialsParams {
+ return &CreateGiteaCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateGiteaCredentialsParamsWithContext creates a new CreateGiteaCredentialsParams object
+// with the ability to set a context for a request.
+func NewCreateGiteaCredentialsParamsWithContext(ctx context.Context) *CreateGiteaCredentialsParams {
+ return &CreateGiteaCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateGiteaCredentialsParamsWithHTTPClient creates a new CreateGiteaCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateGiteaCredentialsParamsWithHTTPClient(client *http.Client) *CreateGiteaCredentialsParams {
+ return &CreateGiteaCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateGiteaCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the create gitea credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateGiteaCredentialsParams struct {
+
+ /* Body.
+
+ Parameters used when creating a Gitea credential.
+ */
+ Body garm_params.CreateGiteaCredentialsParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGiteaCredentialsParams) WithDefaults() *CreateGiteaCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGiteaCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) WithTimeout(timeout time.Duration) *CreateGiteaCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) WithContext(ctx context.Context) *CreateGiteaCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) WithHTTPClient(client *http.Client) *CreateGiteaCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) WithBody(body garm_params.CreateGiteaCredentialsParams) *CreateGiteaCredentialsParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create gitea credentials params
+func (o *CreateGiteaCredentialsParams) SetBody(body garm_params.CreateGiteaCredentialsParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateGiteaCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/create_gitea_credentials_responses.go b/client/credentials/create_gitea_credentials_responses.go
new file mode 100644
index 00000000..2389cb04
--- /dev/null
+++ b/client/credentials/create_gitea_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateGiteaCredentialsReader is a Reader for the CreateGiteaCredentials structure.
+type CreateGiteaCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateGiteaCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateGiteaCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewCreateGiteaCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[POST /gitea/credentials] CreateGiteaCredentials", response, response.Code())
+ }
+}
+
+// NewCreateGiteaCredentialsOK creates a CreateGiteaCredentialsOK with default headers values
+func NewCreateGiteaCredentialsOK() *CreateGiteaCredentialsOK {
+ return &CreateGiteaCredentialsOK{}
+}
+
+/*
+CreateGiteaCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type CreateGiteaCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this create gitea credentials o k response has a 2xx status code
+func (o *CreateGiteaCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create gitea credentials o k response has a 3xx status code
+func (o *CreateGiteaCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create gitea credentials o k response has a 4xx status code
+func (o *CreateGiteaCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create gitea credentials o k response has a 5xx status code
+func (o *CreateGiteaCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create gitea credentials o k response a status code equal to that given
+func (o *CreateGiteaCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create gitea credentials o k response
+func (o *CreateGiteaCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *CreateGiteaCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/credentials][%d] createGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *CreateGiteaCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/credentials][%d] createGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *CreateGiteaCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *CreateGiteaCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateGiteaCredentialsBadRequest creates a CreateGiteaCredentialsBadRequest with default headers values
+func NewCreateGiteaCredentialsBadRequest() *CreateGiteaCredentialsBadRequest {
+ return &CreateGiteaCredentialsBadRequest{}
+}
+
+/*
+CreateGiteaCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type CreateGiteaCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create gitea credentials bad request response has a 2xx status code
+func (o *CreateGiteaCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this create gitea credentials bad request response has a 3xx status code
+func (o *CreateGiteaCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create gitea credentials bad request response has a 4xx status code
+func (o *CreateGiteaCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this create gitea credentials bad request response has a 5xx status code
+func (o *CreateGiteaCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create gitea credentials bad request response a status code equal to that given
+func (o *CreateGiteaCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the create gitea credentials bad request response
+func (o *CreateGiteaCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *CreateGiteaCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/credentials][%d] createGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *CreateGiteaCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/credentials][%d] createGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *CreateGiteaCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateGiteaCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/credentials_client.go b/client/credentials/credentials_client.go
new file mode 100644
index 00000000..3dfe1abd
--- /dev/null
+++ b/client/credentials/credentials_client.go
@@ -0,0 +1,461 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new credentials API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new credentials API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new credentials API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for credentials API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ CreateCredentials(params *CreateCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateCredentialsOK, error)
+
+ CreateGiteaCredentials(params *CreateGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGiteaCredentialsOK, error)
+
+ DeleteCredentials(params *DeleteCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ DeleteGiteaCredentials(params *DeleteGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ GetCredentials(params *GetCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetCredentialsOK, error)
+
+ GetGiteaCredentials(params *GetGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGiteaCredentialsOK, error)
+
+ ListCredentials(params *ListCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListCredentialsOK, error)
+
+ ListGiteaCredentials(params *ListGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGiteaCredentialsOK, error)
+
+ UpdateCredentials(params *UpdateCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateCredentialsOK, error)
+
+ UpdateGiteaCredentials(params *UpdateGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGiteaCredentialsOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+CreateCredentials creates a git hub credential
+*/
+func (a *Client) CreateCredentials(params *CreateCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateCredentials",
+ Method: "POST",
+ PathPattern: "/github/credentials",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for CreateCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+CreateGiteaCredentials creates a gitea credential
+*/
+func (a *Client) CreateGiteaCredentials(params *CreateGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGiteaCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateGiteaCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateGiteaCredentials",
+ Method: "POST",
+ PathPattern: "/gitea/credentials",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateGiteaCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateGiteaCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for CreateGiteaCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+DeleteCredentials deletes a git hub credential
+*/
+func (a *Client) DeleteCredentials(params *DeleteCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteCredentials",
+ Method: "DELETE",
+ PathPattern: "/github/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+DeleteGiteaCredentials deletes a gitea credential
+*/
+func (a *Client) DeleteGiteaCredentials(params *DeleteGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteGiteaCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteGiteaCredentials",
+ Method: "DELETE",
+ PathPattern: "/gitea/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteGiteaCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+GetCredentials gets a git hub credential
+*/
+func (a *Client) GetCredentials(params *GetCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetCredentials",
+ Method: "GET",
+ PathPattern: "/github/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetGiteaCredentials gets a gitea credential
+*/
+func (a *Client) GetGiteaCredentials(params *GetGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGiteaCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetGiteaCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetGiteaCredentials",
+ Method: "GET",
+ PathPattern: "/gitea/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetGiteaCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetGiteaCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetGiteaCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ListCredentials lists all credentials
+*/
+func (a *Client) ListCredentials(params *ListCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListCredentials",
+ Method: "GET",
+ PathPattern: "/github/credentials",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for ListCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ListGiteaCredentials lists all credentials
+*/
+func (a *Client) ListGiteaCredentials(params *ListGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGiteaCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListGiteaCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListGiteaCredentials",
+ Method: "GET",
+ PathPattern: "/gitea/credentials",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListGiteaCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListGiteaCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for ListGiteaCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+UpdateCredentials updates a git hub credential
+*/
+func (a *Client) UpdateCredentials(params *UpdateCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateCredentials",
+ Method: "PUT",
+ PathPattern: "/github/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for UpdateCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+UpdateGiteaCredentials updates a gitea credential
+*/
+func (a *Client) UpdateGiteaCredentials(params *UpdateGiteaCredentialsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGiteaCredentialsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateGiteaCredentialsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateGiteaCredentials",
+ Method: "PUT",
+ PathPattern: "/gitea/credentials/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateGiteaCredentialsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateGiteaCredentialsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for UpdateGiteaCredentials: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/credentials/delete_credentials_parameters.go b/client/credentials/delete_credentials_parameters.go
new file mode 100644
index 00000000..f36f8725
--- /dev/null
+++ b/client/credentials/delete_credentials_parameters.go
@@ -0,0 +1,152 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewDeleteCredentialsParams creates a new DeleteCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteCredentialsParams() *DeleteCredentialsParams {
+ return &DeleteCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteCredentialsParamsWithTimeout creates a new DeleteCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewDeleteCredentialsParamsWithTimeout(timeout time.Duration) *DeleteCredentialsParams {
+ return &DeleteCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteCredentialsParamsWithContext creates a new DeleteCredentialsParams object
+// with the ability to set a context for a request.
+func NewDeleteCredentialsParamsWithContext(ctx context.Context) *DeleteCredentialsParams {
+ return &DeleteCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteCredentialsParamsWithHTTPClient creates a new DeleteCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteCredentialsParamsWithHTTPClient(client *http.Client) *DeleteCredentialsParams {
+ return &DeleteCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the delete credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteCredentialsParams struct {
+
+ /* ID.
+
+ ID of the GitHub credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteCredentialsParams) WithDefaults() *DeleteCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete credentials params
+func (o *DeleteCredentialsParams) WithTimeout(timeout time.Duration) *DeleteCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete credentials params
+func (o *DeleteCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete credentials params
+func (o *DeleteCredentialsParams) WithContext(ctx context.Context) *DeleteCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete credentials params
+func (o *DeleteCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete credentials params
+func (o *DeleteCredentialsParams) WithHTTPClient(client *http.Client) *DeleteCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete credentials params
+func (o *DeleteCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the delete credentials params
+func (o *DeleteCredentialsParams) WithID(id int64) *DeleteCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the delete credentials params
+func (o *DeleteCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/delete_credentials_responses.go b/client/credentials/delete_credentials_responses.go
new file mode 100644
index 00000000..32d045e7
--- /dev/null
+++ b/client/credentials/delete_credentials_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteCredentialsReader is a Reader for the DeleteCredentials structure.
+type DeleteCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteCredentialsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteCredentialsDefault creates a DeleteCredentialsDefault with default headers values
+func NewDeleteCredentialsDefault(code int) *DeleteCredentialsDefault {
+ return &DeleteCredentialsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteCredentialsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteCredentialsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete credentials default response has a 2xx status code
+func (o *DeleteCredentialsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete credentials default response has a 3xx status code
+func (o *DeleteCredentialsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete credentials default response has a 4xx status code
+func (o *DeleteCredentialsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete credentials default response has a 5xx status code
+func (o *DeleteCredentialsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete credentials default response a status code equal to that given
+func (o *DeleteCredentialsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete credentials default response
+func (o *DeleteCredentialsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteCredentialsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /github/credentials/{id}][%d] DeleteCredentials default %s", o._statusCode, payload)
+}
+
+func (o *DeleteCredentialsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /github/credentials/{id}][%d] DeleteCredentials default %s", o._statusCode, payload)
+}
+
+func (o *DeleteCredentialsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteCredentialsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/delete_gitea_credentials_parameters.go b/client/credentials/delete_gitea_credentials_parameters.go
new file mode 100644
index 00000000..598ac477
--- /dev/null
+++ b/client/credentials/delete_gitea_credentials_parameters.go
@@ -0,0 +1,152 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewDeleteGiteaCredentialsParams creates a new DeleteGiteaCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteGiteaCredentialsParams() *DeleteGiteaCredentialsParams {
+ return &DeleteGiteaCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteGiteaCredentialsParamsWithTimeout creates a new DeleteGiteaCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewDeleteGiteaCredentialsParamsWithTimeout(timeout time.Duration) *DeleteGiteaCredentialsParams {
+ return &DeleteGiteaCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteGiteaCredentialsParamsWithContext creates a new DeleteGiteaCredentialsParams object
+// with the ability to set a context for a request.
+func NewDeleteGiteaCredentialsParamsWithContext(ctx context.Context) *DeleteGiteaCredentialsParams {
+ return &DeleteGiteaCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteGiteaCredentialsParamsWithHTTPClient creates a new DeleteGiteaCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteGiteaCredentialsParamsWithHTTPClient(client *http.Client) *DeleteGiteaCredentialsParams {
+ return &DeleteGiteaCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteGiteaCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the delete gitea credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteGiteaCredentialsParams struct {
+
+ /* ID.
+
+ ID of the Gitea credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGiteaCredentialsParams) WithDefaults() *DeleteGiteaCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGiteaCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) WithTimeout(timeout time.Duration) *DeleteGiteaCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) WithContext(ctx context.Context) *DeleteGiteaCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) WithHTTPClient(client *http.Client) *DeleteGiteaCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) WithID(id int64) *DeleteGiteaCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the delete gitea credentials params
+func (o *DeleteGiteaCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteGiteaCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/delete_gitea_credentials_responses.go b/client/credentials/delete_gitea_credentials_responses.go
new file mode 100644
index 00000000..d1df7b0b
--- /dev/null
+++ b/client/credentials/delete_gitea_credentials_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteGiteaCredentialsReader is a Reader for the DeleteGiteaCredentials structure.
+type DeleteGiteaCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteGiteaCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteGiteaCredentialsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteGiteaCredentialsDefault creates a DeleteGiteaCredentialsDefault with default headers values
+func NewDeleteGiteaCredentialsDefault(code int) *DeleteGiteaCredentialsDefault {
+ return &DeleteGiteaCredentialsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteGiteaCredentialsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteGiteaCredentialsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete gitea credentials default response has a 2xx status code
+func (o *DeleteGiteaCredentialsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete gitea credentials default response has a 3xx status code
+func (o *DeleteGiteaCredentialsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete gitea credentials default response has a 4xx status code
+func (o *DeleteGiteaCredentialsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete gitea credentials default response has a 5xx status code
+func (o *DeleteGiteaCredentialsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete gitea credentials default response a status code equal to that given
+func (o *DeleteGiteaCredentialsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete gitea credentials default response
+func (o *DeleteGiteaCredentialsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteGiteaCredentialsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /gitea/credentials/{id}][%d] DeleteGiteaCredentials default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGiteaCredentialsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /gitea/credentials/{id}][%d] DeleteGiteaCredentials default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGiteaCredentialsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteGiteaCredentialsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/get_credentials_parameters.go b/client/credentials/get_credentials_parameters.go
new file mode 100644
index 00000000..ff8305e8
--- /dev/null
+++ b/client/credentials/get_credentials_parameters.go
@@ -0,0 +1,152 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetCredentialsParams creates a new GetCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetCredentialsParams() *GetCredentialsParams {
+ return &GetCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetCredentialsParamsWithTimeout creates a new GetCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewGetCredentialsParamsWithTimeout(timeout time.Duration) *GetCredentialsParams {
+ return &GetCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetCredentialsParamsWithContext creates a new GetCredentialsParams object
+// with the ability to set a context for a request.
+func NewGetCredentialsParamsWithContext(ctx context.Context) *GetCredentialsParams {
+ return &GetCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewGetCredentialsParamsWithHTTPClient creates a new GetCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetCredentialsParamsWithHTTPClient(client *http.Client) *GetCredentialsParams {
+ return &GetCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the get credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetCredentialsParams struct {
+
+ /* ID.
+
+ ID of the GitHub credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetCredentialsParams) WithDefaults() *GetCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get credentials params
+func (o *GetCredentialsParams) WithTimeout(timeout time.Duration) *GetCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get credentials params
+func (o *GetCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get credentials params
+func (o *GetCredentialsParams) WithContext(ctx context.Context) *GetCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get credentials params
+func (o *GetCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get credentials params
+func (o *GetCredentialsParams) WithHTTPClient(client *http.Client) *GetCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get credentials params
+func (o *GetCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get credentials params
+func (o *GetCredentialsParams) WithID(id int64) *GetCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get credentials params
+func (o *GetCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/get_credentials_responses.go b/client/credentials/get_credentials_responses.go
new file mode 100644
index 00000000..4538c16e
--- /dev/null
+++ b/client/credentials/get_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetCredentialsReader is a Reader for the GetCredentials structure.
+type GetCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /github/credentials/{id}] GetCredentials", response, response.Code())
+ }
+}
+
+// NewGetCredentialsOK creates a GetCredentialsOK with default headers values
+func NewGetCredentialsOK() *GetCredentialsOK {
+ return &GetCredentialsOK{}
+}
+
+/*
+GetCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type GetCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this get credentials o k response has a 2xx status code
+func (o *GetCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get credentials o k response has a 3xx status code
+func (o *GetCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get credentials o k response has a 4xx status code
+func (o *GetCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get credentials o k response has a 5xx status code
+func (o *GetCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get credentials o k response a status code equal to that given
+func (o *GetCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get credentials o k response
+func (o *GetCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *GetCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials/{id}][%d] getCredentialsOK %s", 200, payload)
+}
+
+func (o *GetCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials/{id}][%d] getCredentialsOK %s", 200, payload)
+}
+
+func (o *GetCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *GetCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetCredentialsBadRequest creates a GetCredentialsBadRequest with default headers values
+func NewGetCredentialsBadRequest() *GetCredentialsBadRequest {
+ return &GetCredentialsBadRequest{}
+}
+
+/*
+GetCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type GetCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get credentials bad request response has a 2xx status code
+func (o *GetCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get credentials bad request response has a 3xx status code
+func (o *GetCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get credentials bad request response has a 4xx status code
+func (o *GetCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get credentials bad request response has a 5xx status code
+func (o *GetCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get credentials bad request response a status code equal to that given
+func (o *GetCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the get credentials bad request response
+func (o *GetCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *GetCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials/{id}][%d] getCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *GetCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials/{id}][%d] getCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *GetCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/get_gitea_credentials_parameters.go b/client/credentials/get_gitea_credentials_parameters.go
new file mode 100644
index 00000000..a844c326
--- /dev/null
+++ b/client/credentials/get_gitea_credentials_parameters.go
@@ -0,0 +1,152 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetGiteaCredentialsParams creates a new GetGiteaCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetGiteaCredentialsParams() *GetGiteaCredentialsParams {
+ return &GetGiteaCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetGiteaCredentialsParamsWithTimeout creates a new GetGiteaCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewGetGiteaCredentialsParamsWithTimeout(timeout time.Duration) *GetGiteaCredentialsParams {
+ return &GetGiteaCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetGiteaCredentialsParamsWithContext creates a new GetGiteaCredentialsParams object
+// with the ability to set a context for a request.
+func NewGetGiteaCredentialsParamsWithContext(ctx context.Context) *GetGiteaCredentialsParams {
+ return &GetGiteaCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewGetGiteaCredentialsParamsWithHTTPClient creates a new GetGiteaCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetGiteaCredentialsParamsWithHTTPClient(client *http.Client) *GetGiteaCredentialsParams {
+ return &GetGiteaCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetGiteaCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the get gitea credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetGiteaCredentialsParams struct {
+
+ /* ID.
+
+ ID of the Gitea credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGiteaCredentialsParams) WithDefaults() *GetGiteaCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGiteaCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) WithTimeout(timeout time.Duration) *GetGiteaCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) WithContext(ctx context.Context) *GetGiteaCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) WithHTTPClient(client *http.Client) *GetGiteaCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) WithID(id int64) *GetGiteaCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get gitea credentials params
+func (o *GetGiteaCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetGiteaCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/get_gitea_credentials_responses.go b/client/credentials/get_gitea_credentials_responses.go
new file mode 100644
index 00000000..ba116d63
--- /dev/null
+++ b/client/credentials/get_gitea_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetGiteaCredentialsReader is a Reader for the GetGiteaCredentials structure.
+type GetGiteaCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetGiteaCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetGiteaCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetGiteaCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /gitea/credentials/{id}] GetGiteaCredentials", response, response.Code())
+ }
+}
+
+// NewGetGiteaCredentialsOK creates a GetGiteaCredentialsOK with default headers values
+func NewGetGiteaCredentialsOK() *GetGiteaCredentialsOK {
+ return &GetGiteaCredentialsOK{}
+}
+
+/*
+GetGiteaCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type GetGiteaCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this get gitea credentials o k response has a 2xx status code
+func (o *GetGiteaCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get gitea credentials o k response has a 3xx status code
+func (o *GetGiteaCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get gitea credentials o k response has a 4xx status code
+func (o *GetGiteaCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get gitea credentials o k response has a 5xx status code
+func (o *GetGiteaCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get gitea credentials o k response a status code equal to that given
+func (o *GetGiteaCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get gitea credentials o k response
+func (o *GetGiteaCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *GetGiteaCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials/{id}][%d] getGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *GetGiteaCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials/{id}][%d] getGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *GetGiteaCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *GetGiteaCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetGiteaCredentialsBadRequest creates a GetGiteaCredentialsBadRequest with default headers values
+func NewGetGiteaCredentialsBadRequest() *GetGiteaCredentialsBadRequest {
+ return &GetGiteaCredentialsBadRequest{}
+}
+
+/*
+GetGiteaCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type GetGiteaCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get gitea credentials bad request response has a 2xx status code
+func (o *GetGiteaCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get gitea credentials bad request response has a 3xx status code
+func (o *GetGiteaCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get gitea credentials bad request response has a 4xx status code
+func (o *GetGiteaCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get gitea credentials bad request response has a 5xx status code
+func (o *GetGiteaCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get gitea credentials bad request response a status code equal to that given
+func (o *GetGiteaCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the get gitea credentials bad request response
+func (o *GetGiteaCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *GetGiteaCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials/{id}][%d] getGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *GetGiteaCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials/{id}][%d] getGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *GetGiteaCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetGiteaCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/list_credentials_parameters.go b/client/credentials/list_credentials_parameters.go
new file mode 100644
index 00000000..fdf839d7
--- /dev/null
+++ b/client/credentials/list_credentials_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListCredentialsParams creates a new ListCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListCredentialsParams() *ListCredentialsParams {
+ return &ListCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListCredentialsParamsWithTimeout creates a new ListCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewListCredentialsParamsWithTimeout(timeout time.Duration) *ListCredentialsParams {
+ return &ListCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListCredentialsParamsWithContext creates a new ListCredentialsParams object
+// with the ability to set a context for a request.
+func NewListCredentialsParamsWithContext(ctx context.Context) *ListCredentialsParams {
+ return &ListCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewListCredentialsParamsWithHTTPClient creates a new ListCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListCredentialsParamsWithHTTPClient(client *http.Client) *ListCredentialsParams {
+ return &ListCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the list credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListCredentialsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListCredentialsParams) WithDefaults() *ListCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list credentials params
+func (o *ListCredentialsParams) WithTimeout(timeout time.Duration) *ListCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list credentials params
+func (o *ListCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list credentials params
+func (o *ListCredentialsParams) WithContext(ctx context.Context) *ListCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list credentials params
+func (o *ListCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list credentials params
+func (o *ListCredentialsParams) WithHTTPClient(client *http.Client) *ListCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list credentials params
+func (o *ListCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/list_credentials_responses.go b/client/credentials/list_credentials_responses.go
new file mode 100644
index 00000000..46163dc9
--- /dev/null
+++ b/client/credentials/list_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListCredentialsReader is a Reader for the ListCredentials structure.
+type ListCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewListCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /github/credentials] ListCredentials", response, response.Code())
+ }
+}
+
+// NewListCredentialsOK creates a ListCredentialsOK with default headers values
+func NewListCredentialsOK() *ListCredentialsOK {
+ return &ListCredentialsOK{}
+}
+
+/*
+ListCredentialsOK describes a response with status code 200, with default header values.
+
+Credentials
+*/
+type ListCredentialsOK struct {
+ Payload garm_params.Credentials
+}
+
+// IsSuccess returns true when this list credentials o k response has a 2xx status code
+func (o *ListCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list credentials o k response has a 3xx status code
+func (o *ListCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list credentials o k response has a 4xx status code
+func (o *ListCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list credentials o k response has a 5xx status code
+func (o *ListCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list credentials o k response a status code equal to that given
+func (o *ListCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list credentials o k response
+func (o *ListCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *ListCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials][%d] listCredentialsOK %s", 200, payload)
+}
+
+func (o *ListCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials][%d] listCredentialsOK %s", 200, payload)
+}
+
+func (o *ListCredentialsOK) GetPayload() garm_params.Credentials {
+ return o.Payload
+}
+
+func (o *ListCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListCredentialsBadRequest creates a ListCredentialsBadRequest with default headers values
+func NewListCredentialsBadRequest() *ListCredentialsBadRequest {
+ return &ListCredentialsBadRequest{}
+}
+
+/*
+ListCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type ListCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list credentials bad request response has a 2xx status code
+func (o *ListCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this list credentials bad request response has a 3xx status code
+func (o *ListCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list credentials bad request response has a 4xx status code
+func (o *ListCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this list credentials bad request response has a 5xx status code
+func (o *ListCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list credentials bad request response a status code equal to that given
+func (o *ListCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the list credentials bad request response
+func (o *ListCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *ListCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials][%d] listCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *ListCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/credentials][%d] listCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *ListCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/list_gitea_credentials_parameters.go b/client/credentials/list_gitea_credentials_parameters.go
new file mode 100644
index 00000000..5e321a88
--- /dev/null
+++ b/client/credentials/list_gitea_credentials_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListGiteaCredentialsParams creates a new ListGiteaCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListGiteaCredentialsParams() *ListGiteaCredentialsParams {
+ return &ListGiteaCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListGiteaCredentialsParamsWithTimeout creates a new ListGiteaCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewListGiteaCredentialsParamsWithTimeout(timeout time.Duration) *ListGiteaCredentialsParams {
+ return &ListGiteaCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListGiteaCredentialsParamsWithContext creates a new ListGiteaCredentialsParams object
+// with the ability to set a context for a request.
+func NewListGiteaCredentialsParamsWithContext(ctx context.Context) *ListGiteaCredentialsParams {
+ return &ListGiteaCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewListGiteaCredentialsParamsWithHTTPClient creates a new ListGiteaCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListGiteaCredentialsParamsWithHTTPClient(client *http.Client) *ListGiteaCredentialsParams {
+ return &ListGiteaCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListGiteaCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the list gitea credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListGiteaCredentialsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGiteaCredentialsParams) WithDefaults() *ListGiteaCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGiteaCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) WithTimeout(timeout time.Duration) *ListGiteaCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) WithContext(ctx context.Context) *ListGiteaCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) WithHTTPClient(client *http.Client) *ListGiteaCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list gitea credentials params
+func (o *ListGiteaCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListGiteaCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/list_gitea_credentials_responses.go b/client/credentials/list_gitea_credentials_responses.go
new file mode 100644
index 00000000..f27864be
--- /dev/null
+++ b/client/credentials/list_gitea_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListGiteaCredentialsReader is a Reader for the ListGiteaCredentials structure.
+type ListGiteaCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListGiteaCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListGiteaCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewListGiteaCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /gitea/credentials] ListGiteaCredentials", response, response.Code())
+ }
+}
+
+// NewListGiteaCredentialsOK creates a ListGiteaCredentialsOK with default headers values
+func NewListGiteaCredentialsOK() *ListGiteaCredentialsOK {
+ return &ListGiteaCredentialsOK{}
+}
+
+/*
+ListGiteaCredentialsOK describes a response with status code 200, with default header values.
+
+Credentials
+*/
+type ListGiteaCredentialsOK struct {
+ Payload garm_params.Credentials
+}
+
+// IsSuccess returns true when this list gitea credentials o k response has a 2xx status code
+func (o *ListGiteaCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list gitea credentials o k response has a 3xx status code
+func (o *ListGiteaCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list gitea credentials o k response has a 4xx status code
+func (o *ListGiteaCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list gitea credentials o k response has a 5xx status code
+func (o *ListGiteaCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list gitea credentials o k response a status code equal to that given
+func (o *ListGiteaCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list gitea credentials o k response
+func (o *ListGiteaCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *ListGiteaCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials][%d] listGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *ListGiteaCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials][%d] listGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *ListGiteaCredentialsOK) GetPayload() garm_params.Credentials {
+ return o.Payload
+}
+
+func (o *ListGiteaCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListGiteaCredentialsBadRequest creates a ListGiteaCredentialsBadRequest with default headers values
+func NewListGiteaCredentialsBadRequest() *ListGiteaCredentialsBadRequest {
+ return &ListGiteaCredentialsBadRequest{}
+}
+
+/*
+ListGiteaCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type ListGiteaCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list gitea credentials bad request response has a 2xx status code
+func (o *ListGiteaCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this list gitea credentials bad request response has a 3xx status code
+func (o *ListGiteaCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list gitea credentials bad request response has a 4xx status code
+func (o *ListGiteaCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this list gitea credentials bad request response has a 5xx status code
+func (o *ListGiteaCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list gitea credentials bad request response a status code equal to that given
+func (o *ListGiteaCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the list gitea credentials bad request response
+func (o *ListGiteaCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *ListGiteaCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials][%d] listGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *ListGiteaCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/credentials][%d] listGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *ListGiteaCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListGiteaCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/update_credentials_parameters.go b/client/credentials/update_credentials_parameters.go
new file mode 100644
index 00000000..bba26c95
--- /dev/null
+++ b/client/credentials/update_credentials_parameters.go
@@ -0,0 +1,174 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateCredentialsParams creates a new UpdateCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateCredentialsParams() *UpdateCredentialsParams {
+ return &UpdateCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateCredentialsParamsWithTimeout creates a new UpdateCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewUpdateCredentialsParamsWithTimeout(timeout time.Duration) *UpdateCredentialsParams {
+ return &UpdateCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateCredentialsParamsWithContext creates a new UpdateCredentialsParams object
+// with the ability to set a context for a request.
+func NewUpdateCredentialsParamsWithContext(ctx context.Context) *UpdateCredentialsParams {
+ return &UpdateCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateCredentialsParamsWithHTTPClient creates a new UpdateCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateCredentialsParamsWithHTTPClient(client *http.Client) *UpdateCredentialsParams {
+ return &UpdateCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the update credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateCredentialsParams struct {
+
+ /* Body.
+
+ Parameters used when updating a GitHub credential.
+ */
+ Body garm_params.UpdateGithubCredentialsParams
+
+ /* ID.
+
+ ID of the GitHub credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateCredentialsParams) WithDefaults() *UpdateCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update credentials params
+func (o *UpdateCredentialsParams) WithTimeout(timeout time.Duration) *UpdateCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update credentials params
+func (o *UpdateCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update credentials params
+func (o *UpdateCredentialsParams) WithContext(ctx context.Context) *UpdateCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update credentials params
+func (o *UpdateCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update credentials params
+func (o *UpdateCredentialsParams) WithHTTPClient(client *http.Client) *UpdateCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update credentials params
+func (o *UpdateCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update credentials params
+func (o *UpdateCredentialsParams) WithBody(body garm_params.UpdateGithubCredentialsParams) *UpdateCredentialsParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update credentials params
+func (o *UpdateCredentialsParams) SetBody(body garm_params.UpdateGithubCredentialsParams) {
+ o.Body = body
+}
+
+// WithID adds the id to the update credentials params
+func (o *UpdateCredentialsParams) WithID(id int64) *UpdateCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the update credentials params
+func (o *UpdateCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/update_credentials_responses.go b/client/credentials/update_credentials_responses.go
new file mode 100644
index 00000000..6a9f37f8
--- /dev/null
+++ b/client/credentials/update_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateCredentialsReader is a Reader for the UpdateCredentials structure.
+type UpdateCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewUpdateCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[PUT /github/credentials/{id}] UpdateCredentials", response, response.Code())
+ }
+}
+
+// NewUpdateCredentialsOK creates a UpdateCredentialsOK with default headers values
+func NewUpdateCredentialsOK() *UpdateCredentialsOK {
+ return &UpdateCredentialsOK{}
+}
+
+/*
+UpdateCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type UpdateCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this update credentials o k response has a 2xx status code
+func (o *UpdateCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update credentials o k response has a 3xx status code
+func (o *UpdateCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update credentials o k response has a 4xx status code
+func (o *UpdateCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update credentials o k response has a 5xx status code
+func (o *UpdateCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update credentials o k response a status code equal to that given
+func (o *UpdateCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update credentials o k response
+func (o *UpdateCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *UpdateCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/credentials/{id}][%d] updateCredentialsOK %s", 200, payload)
+}
+
+func (o *UpdateCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/credentials/{id}][%d] updateCredentialsOK %s", 200, payload)
+}
+
+func (o *UpdateCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *UpdateCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateCredentialsBadRequest creates a UpdateCredentialsBadRequest with default headers values
+func NewUpdateCredentialsBadRequest() *UpdateCredentialsBadRequest {
+ return &UpdateCredentialsBadRequest{}
+}
+
+/*
+UpdateCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type UpdateCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update credentials bad request response has a 2xx status code
+func (o *UpdateCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this update credentials bad request response has a 3xx status code
+func (o *UpdateCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update credentials bad request response has a 4xx status code
+func (o *UpdateCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this update credentials bad request response has a 5xx status code
+func (o *UpdateCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update credentials bad request response a status code equal to that given
+func (o *UpdateCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the update credentials bad request response
+func (o *UpdateCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *UpdateCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/credentials/{id}][%d] updateCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *UpdateCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/credentials/{id}][%d] updateCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *UpdateCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/credentials/update_gitea_credentials_parameters.go b/client/credentials/update_gitea_credentials_parameters.go
new file mode 100644
index 00000000..1907a0f2
--- /dev/null
+++ b/client/credentials/update_gitea_credentials_parameters.go
@@ -0,0 +1,174 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateGiteaCredentialsParams creates a new UpdateGiteaCredentialsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateGiteaCredentialsParams() *UpdateGiteaCredentialsParams {
+ return &UpdateGiteaCredentialsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateGiteaCredentialsParamsWithTimeout creates a new UpdateGiteaCredentialsParams object
+// with the ability to set a timeout on a request.
+func NewUpdateGiteaCredentialsParamsWithTimeout(timeout time.Duration) *UpdateGiteaCredentialsParams {
+ return &UpdateGiteaCredentialsParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateGiteaCredentialsParamsWithContext creates a new UpdateGiteaCredentialsParams object
+// with the ability to set a context for a request.
+func NewUpdateGiteaCredentialsParamsWithContext(ctx context.Context) *UpdateGiteaCredentialsParams {
+ return &UpdateGiteaCredentialsParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateGiteaCredentialsParamsWithHTTPClient creates a new UpdateGiteaCredentialsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateGiteaCredentialsParamsWithHTTPClient(client *http.Client) *UpdateGiteaCredentialsParams {
+ return &UpdateGiteaCredentialsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateGiteaCredentialsParams contains all the parameters to send to the API endpoint
+
+ for the update gitea credentials operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateGiteaCredentialsParams struct {
+
+ /* Body.
+
+ Parameters used when updating a Gitea credential.
+ */
+ Body garm_params.UpdateGiteaCredentialsParams
+
+ /* ID.
+
+ ID of the Gitea credential.
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGiteaCredentialsParams) WithDefaults() *UpdateGiteaCredentialsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update gitea credentials params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGiteaCredentialsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) WithTimeout(timeout time.Duration) *UpdateGiteaCredentialsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) WithContext(ctx context.Context) *UpdateGiteaCredentialsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) WithHTTPClient(client *http.Client) *UpdateGiteaCredentialsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) WithBody(body garm_params.UpdateGiteaCredentialsParams) *UpdateGiteaCredentialsParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) SetBody(body garm_params.UpdateGiteaCredentialsParams) {
+ o.Body = body
+}
+
+// WithID adds the id to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) WithID(id int64) *UpdateGiteaCredentialsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the update gitea credentials params
+func (o *UpdateGiteaCredentialsParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateGiteaCredentialsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/credentials/update_gitea_credentials_responses.go b/client/credentials/update_gitea_credentials_responses.go
new file mode 100644
index 00000000..edbb54d8
--- /dev/null
+++ b/client/credentials/update_gitea_credentials_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package credentials
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateGiteaCredentialsReader is a Reader for the UpdateGiteaCredentials structure.
+type UpdateGiteaCredentialsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateGiteaCredentialsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateGiteaCredentialsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewUpdateGiteaCredentialsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[PUT /gitea/credentials/{id}] UpdateGiteaCredentials", response, response.Code())
+ }
+}
+
+// NewUpdateGiteaCredentialsOK creates a UpdateGiteaCredentialsOK with default headers values
+func NewUpdateGiteaCredentialsOK() *UpdateGiteaCredentialsOK {
+ return &UpdateGiteaCredentialsOK{}
+}
+
+/*
+UpdateGiteaCredentialsOK describes a response with status code 200, with default header values.
+
+ForgeCredentials
+*/
+type UpdateGiteaCredentialsOK struct {
+ Payload garm_params.ForgeCredentials
+}
+
+// IsSuccess returns true when this update gitea credentials o k response has a 2xx status code
+func (o *UpdateGiteaCredentialsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update gitea credentials o k response has a 3xx status code
+func (o *UpdateGiteaCredentialsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update gitea credentials o k response has a 4xx status code
+func (o *UpdateGiteaCredentialsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update gitea credentials o k response has a 5xx status code
+func (o *UpdateGiteaCredentialsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update gitea credentials o k response a status code equal to that given
+func (o *UpdateGiteaCredentialsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update gitea credentials o k response
+func (o *UpdateGiteaCredentialsOK) Code() int {
+ return 200
+}
+
+func (o *UpdateGiteaCredentialsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/credentials/{id}][%d] updateGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *UpdateGiteaCredentialsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/credentials/{id}][%d] updateGiteaCredentialsOK %s", 200, payload)
+}
+
+func (o *UpdateGiteaCredentialsOK) GetPayload() garm_params.ForgeCredentials {
+ return o.Payload
+}
+
+func (o *UpdateGiteaCredentialsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateGiteaCredentialsBadRequest creates a UpdateGiteaCredentialsBadRequest with default headers values
+func NewUpdateGiteaCredentialsBadRequest() *UpdateGiteaCredentialsBadRequest {
+ return &UpdateGiteaCredentialsBadRequest{}
+}
+
+/*
+UpdateGiteaCredentialsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type UpdateGiteaCredentialsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update gitea credentials bad request response has a 2xx status code
+func (o *UpdateGiteaCredentialsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this update gitea credentials bad request response has a 3xx status code
+func (o *UpdateGiteaCredentialsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update gitea credentials bad request response has a 4xx status code
+func (o *UpdateGiteaCredentialsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this update gitea credentials bad request response has a 5xx status code
+func (o *UpdateGiteaCredentialsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update gitea credentials bad request response a status code equal to that given
+func (o *UpdateGiteaCredentialsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the update gitea credentials bad request response
+func (o *UpdateGiteaCredentialsBadRequest) Code() int {
+ return 400
+}
+
+func (o *UpdateGiteaCredentialsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/credentials/{id}][%d] updateGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *UpdateGiteaCredentialsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/credentials/{id}][%d] updateGiteaCredentialsBadRequest %s", 400, payload)
+}
+
+func (o *UpdateGiteaCredentialsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateGiteaCredentialsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/create_gitea_endpoint_parameters.go b/client/endpoints/create_gitea_endpoint_parameters.go
new file mode 100644
index 00000000..11dfa73f
--- /dev/null
+++ b/client/endpoints/create_gitea_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateGiteaEndpointParams creates a new CreateGiteaEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateGiteaEndpointParams() *CreateGiteaEndpointParams {
+ return &CreateGiteaEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateGiteaEndpointParamsWithTimeout creates a new CreateGiteaEndpointParams object
+// with the ability to set a timeout on a request.
+func NewCreateGiteaEndpointParamsWithTimeout(timeout time.Duration) *CreateGiteaEndpointParams {
+ return &CreateGiteaEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateGiteaEndpointParamsWithContext creates a new CreateGiteaEndpointParams object
+// with the ability to set a context for a request.
+func NewCreateGiteaEndpointParamsWithContext(ctx context.Context) *CreateGiteaEndpointParams {
+ return &CreateGiteaEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateGiteaEndpointParamsWithHTTPClient creates a new CreateGiteaEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateGiteaEndpointParamsWithHTTPClient(client *http.Client) *CreateGiteaEndpointParams {
+ return &CreateGiteaEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateGiteaEndpointParams contains all the parameters to send to the API endpoint
+
+ for the create gitea endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateGiteaEndpointParams struct {
+
+ /* Body.
+
+ Parameters used when creating a Gitea endpoint.
+ */
+ Body garm_params.CreateGiteaEndpointParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGiteaEndpointParams) WithDefaults() *CreateGiteaEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGiteaEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) WithTimeout(timeout time.Duration) *CreateGiteaEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) WithContext(ctx context.Context) *CreateGiteaEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) WithHTTPClient(client *http.Client) *CreateGiteaEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) WithBody(body garm_params.CreateGiteaEndpointParams) *CreateGiteaEndpointParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create gitea endpoint params
+func (o *CreateGiteaEndpointParams) SetBody(body garm_params.CreateGiteaEndpointParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateGiteaEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/create_gitea_endpoint_responses.go b/client/endpoints/create_gitea_endpoint_responses.go
new file mode 100644
index 00000000..6e99a973
--- /dev/null
+++ b/client/endpoints/create_gitea_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateGiteaEndpointReader is a Reader for the CreateGiteaEndpoint structure.
+type CreateGiteaEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateGiteaEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateGiteaEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateGiteaEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateGiteaEndpointOK creates a CreateGiteaEndpointOK with default headers values
+func NewCreateGiteaEndpointOK() *CreateGiteaEndpointOK {
+ return &CreateGiteaEndpointOK{}
+}
+
+/*
+CreateGiteaEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type CreateGiteaEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this create gitea endpoint o k response has a 2xx status code
+func (o *CreateGiteaEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create gitea endpoint o k response has a 3xx status code
+func (o *CreateGiteaEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create gitea endpoint o k response has a 4xx status code
+func (o *CreateGiteaEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create gitea endpoint o k response has a 5xx status code
+func (o *CreateGiteaEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create gitea endpoint o k response a status code equal to that given
+func (o *CreateGiteaEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create gitea endpoint o k response
+func (o *CreateGiteaEndpointOK) Code() int {
+ return 200
+}
+
+func (o *CreateGiteaEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/endpoints][%d] createGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *CreateGiteaEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/endpoints][%d] createGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *CreateGiteaEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *CreateGiteaEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateGiteaEndpointDefault creates a CreateGiteaEndpointDefault with default headers values
+func NewCreateGiteaEndpointDefault(code int) *CreateGiteaEndpointDefault {
+ return &CreateGiteaEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateGiteaEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateGiteaEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create gitea endpoint default response has a 2xx status code
+func (o *CreateGiteaEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create gitea endpoint default response has a 3xx status code
+func (o *CreateGiteaEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create gitea endpoint default response has a 4xx status code
+func (o *CreateGiteaEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create gitea endpoint default response has a 5xx status code
+func (o *CreateGiteaEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create gitea endpoint default response a status code equal to that given
+func (o *CreateGiteaEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create gitea endpoint default response
+func (o *CreateGiteaEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateGiteaEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/endpoints][%d] CreateGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *CreateGiteaEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /gitea/endpoints][%d] CreateGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *CreateGiteaEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateGiteaEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/create_github_endpoint_parameters.go b/client/endpoints/create_github_endpoint_parameters.go
new file mode 100644
index 00000000..030fa167
--- /dev/null
+++ b/client/endpoints/create_github_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateGithubEndpointParams creates a new CreateGithubEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateGithubEndpointParams() *CreateGithubEndpointParams {
+ return &CreateGithubEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateGithubEndpointParamsWithTimeout creates a new CreateGithubEndpointParams object
+// with the ability to set a timeout on a request.
+func NewCreateGithubEndpointParamsWithTimeout(timeout time.Duration) *CreateGithubEndpointParams {
+ return &CreateGithubEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateGithubEndpointParamsWithContext creates a new CreateGithubEndpointParams object
+// with the ability to set a context for a request.
+func NewCreateGithubEndpointParamsWithContext(ctx context.Context) *CreateGithubEndpointParams {
+ return &CreateGithubEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateGithubEndpointParamsWithHTTPClient creates a new CreateGithubEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateGithubEndpointParamsWithHTTPClient(client *http.Client) *CreateGithubEndpointParams {
+ return &CreateGithubEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateGithubEndpointParams contains all the parameters to send to the API endpoint
+
+ for the create github endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateGithubEndpointParams struct {
+
+ /* Body.
+
+ Parameters used when creating a GitHub endpoint.
+ */
+ Body garm_params.CreateGithubEndpointParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGithubEndpointParams) WithDefaults() *CreateGithubEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateGithubEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create github endpoint params
+func (o *CreateGithubEndpointParams) WithTimeout(timeout time.Duration) *CreateGithubEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create github endpoint params
+func (o *CreateGithubEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create github endpoint params
+func (o *CreateGithubEndpointParams) WithContext(ctx context.Context) *CreateGithubEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create github endpoint params
+func (o *CreateGithubEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create github endpoint params
+func (o *CreateGithubEndpointParams) WithHTTPClient(client *http.Client) *CreateGithubEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create github endpoint params
+func (o *CreateGithubEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create github endpoint params
+func (o *CreateGithubEndpointParams) WithBody(body garm_params.CreateGithubEndpointParams) *CreateGithubEndpointParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create github endpoint params
+func (o *CreateGithubEndpointParams) SetBody(body garm_params.CreateGithubEndpointParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateGithubEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/create_github_endpoint_responses.go b/client/endpoints/create_github_endpoint_responses.go
new file mode 100644
index 00000000..60961f3a
--- /dev/null
+++ b/client/endpoints/create_github_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateGithubEndpointReader is a Reader for the CreateGithubEndpoint structure.
+type CreateGithubEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateGithubEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateGithubEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateGithubEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateGithubEndpointOK creates a CreateGithubEndpointOK with default headers values
+func NewCreateGithubEndpointOK() *CreateGithubEndpointOK {
+ return &CreateGithubEndpointOK{}
+}
+
+/*
+CreateGithubEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type CreateGithubEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this create github endpoint o k response has a 2xx status code
+func (o *CreateGithubEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create github endpoint o k response has a 3xx status code
+func (o *CreateGithubEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create github endpoint o k response has a 4xx status code
+func (o *CreateGithubEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create github endpoint o k response has a 5xx status code
+func (o *CreateGithubEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create github endpoint o k response a status code equal to that given
+func (o *CreateGithubEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create github endpoint o k response
+func (o *CreateGithubEndpointOK) Code() int {
+ return 200
+}
+
+func (o *CreateGithubEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/endpoints][%d] createGithubEndpointOK %s", 200, payload)
+}
+
+func (o *CreateGithubEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/endpoints][%d] createGithubEndpointOK %s", 200, payload)
+}
+
+func (o *CreateGithubEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *CreateGithubEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateGithubEndpointDefault creates a CreateGithubEndpointDefault with default headers values
+func NewCreateGithubEndpointDefault(code int) *CreateGithubEndpointDefault {
+ return &CreateGithubEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateGithubEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateGithubEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create github endpoint default response has a 2xx status code
+func (o *CreateGithubEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create github endpoint default response has a 3xx status code
+func (o *CreateGithubEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create github endpoint default response has a 4xx status code
+func (o *CreateGithubEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create github endpoint default response has a 5xx status code
+func (o *CreateGithubEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create github endpoint default response a status code equal to that given
+func (o *CreateGithubEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create github endpoint default response
+func (o *CreateGithubEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateGithubEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/endpoints][%d] CreateGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *CreateGithubEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /github/endpoints][%d] CreateGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *CreateGithubEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateGithubEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/delete_gitea_endpoint_parameters.go b/client/endpoints/delete_gitea_endpoint_parameters.go
new file mode 100644
index 00000000..f7ea5a5d
--- /dev/null
+++ b/client/endpoints/delete_gitea_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteGiteaEndpointParams creates a new DeleteGiteaEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteGiteaEndpointParams() *DeleteGiteaEndpointParams {
+ return &DeleteGiteaEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteGiteaEndpointParamsWithTimeout creates a new DeleteGiteaEndpointParams object
+// with the ability to set a timeout on a request.
+func NewDeleteGiteaEndpointParamsWithTimeout(timeout time.Duration) *DeleteGiteaEndpointParams {
+ return &DeleteGiteaEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteGiteaEndpointParamsWithContext creates a new DeleteGiteaEndpointParams object
+// with the ability to set a context for a request.
+func NewDeleteGiteaEndpointParamsWithContext(ctx context.Context) *DeleteGiteaEndpointParams {
+ return &DeleteGiteaEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteGiteaEndpointParamsWithHTTPClient creates a new DeleteGiteaEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteGiteaEndpointParamsWithHTTPClient(client *http.Client) *DeleteGiteaEndpointParams {
+ return &DeleteGiteaEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteGiteaEndpointParams contains all the parameters to send to the API endpoint
+
+ for the delete gitea endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteGiteaEndpointParams struct {
+
+ /* Name.
+
+ The name of the Gitea endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGiteaEndpointParams) WithDefaults() *DeleteGiteaEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGiteaEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) WithTimeout(timeout time.Duration) *DeleteGiteaEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) WithContext(ctx context.Context) *DeleteGiteaEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) WithHTTPClient(client *http.Client) *DeleteGiteaEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithName adds the name to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) WithName(name string) *DeleteGiteaEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the delete gitea endpoint params
+func (o *DeleteGiteaEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteGiteaEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/delete_gitea_endpoint_responses.go b/client/endpoints/delete_gitea_endpoint_responses.go
new file mode 100644
index 00000000..787d6585
--- /dev/null
+++ b/client/endpoints/delete_gitea_endpoint_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteGiteaEndpointReader is a Reader for the DeleteGiteaEndpoint structure.
+type DeleteGiteaEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteGiteaEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteGiteaEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteGiteaEndpointDefault creates a DeleteGiteaEndpointDefault with default headers values
+func NewDeleteGiteaEndpointDefault(code int) *DeleteGiteaEndpointDefault {
+ return &DeleteGiteaEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteGiteaEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteGiteaEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete gitea endpoint default response has a 2xx status code
+func (o *DeleteGiteaEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete gitea endpoint default response has a 3xx status code
+func (o *DeleteGiteaEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete gitea endpoint default response has a 4xx status code
+func (o *DeleteGiteaEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete gitea endpoint default response has a 5xx status code
+func (o *DeleteGiteaEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete gitea endpoint default response a status code equal to that given
+func (o *DeleteGiteaEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete gitea endpoint default response
+func (o *DeleteGiteaEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteGiteaEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /gitea/endpoints/{name}][%d] DeleteGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGiteaEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /gitea/endpoints/{name}][%d] DeleteGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGiteaEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteGiteaEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/delete_github_endpoint_parameters.go b/client/endpoints/delete_github_endpoint_parameters.go
new file mode 100644
index 00000000..a02d4107
--- /dev/null
+++ b/client/endpoints/delete_github_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteGithubEndpointParams creates a new DeleteGithubEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteGithubEndpointParams() *DeleteGithubEndpointParams {
+ return &DeleteGithubEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteGithubEndpointParamsWithTimeout creates a new DeleteGithubEndpointParams object
+// with the ability to set a timeout on a request.
+func NewDeleteGithubEndpointParamsWithTimeout(timeout time.Duration) *DeleteGithubEndpointParams {
+ return &DeleteGithubEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteGithubEndpointParamsWithContext creates a new DeleteGithubEndpointParams object
+// with the ability to set a context for a request.
+func NewDeleteGithubEndpointParamsWithContext(ctx context.Context) *DeleteGithubEndpointParams {
+ return &DeleteGithubEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteGithubEndpointParamsWithHTTPClient creates a new DeleteGithubEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteGithubEndpointParamsWithHTTPClient(client *http.Client) *DeleteGithubEndpointParams {
+ return &DeleteGithubEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteGithubEndpointParams contains all the parameters to send to the API endpoint
+
+ for the delete github endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteGithubEndpointParams struct {
+
+ /* Name.
+
+ The name of the GitHub endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGithubEndpointParams) WithDefaults() *DeleteGithubEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteGithubEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) WithTimeout(timeout time.Duration) *DeleteGithubEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) WithContext(ctx context.Context) *DeleteGithubEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) WithHTTPClient(client *http.Client) *DeleteGithubEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithName adds the name to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) WithName(name string) *DeleteGithubEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the delete github endpoint params
+func (o *DeleteGithubEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteGithubEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/delete_github_endpoint_responses.go b/client/endpoints/delete_github_endpoint_responses.go
new file mode 100644
index 00000000..21b3f880
--- /dev/null
+++ b/client/endpoints/delete_github_endpoint_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteGithubEndpointReader is a Reader for the DeleteGithubEndpoint structure.
+type DeleteGithubEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteGithubEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteGithubEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteGithubEndpointDefault creates a DeleteGithubEndpointDefault with default headers values
+func NewDeleteGithubEndpointDefault(code int) *DeleteGithubEndpointDefault {
+ return &DeleteGithubEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteGithubEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteGithubEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete github endpoint default response has a 2xx status code
+func (o *DeleteGithubEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete github endpoint default response has a 3xx status code
+func (o *DeleteGithubEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete github endpoint default response has a 4xx status code
+func (o *DeleteGithubEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete github endpoint default response has a 5xx status code
+func (o *DeleteGithubEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete github endpoint default response a status code equal to that given
+func (o *DeleteGithubEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete github endpoint default response
+func (o *DeleteGithubEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteGithubEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /github/endpoints/{name}][%d] DeleteGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGithubEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /github/endpoints/{name}][%d] DeleteGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *DeleteGithubEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteGithubEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/endpoints_client.go b/client/endpoints/endpoints_client.go
new file mode 100644
index 00000000..74019577
--- /dev/null
+++ b/client/endpoints/endpoints_client.go
@@ -0,0 +1,451 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new endpoints API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new endpoints API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new endpoints API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for endpoints API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ CreateGiteaEndpoint(params *CreateGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGiteaEndpointOK, error)
+
+ CreateGithubEndpoint(params *CreateGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGithubEndpointOK, error)
+
+ DeleteGiteaEndpoint(params *DeleteGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ DeleteGithubEndpoint(params *DeleteGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ GetGiteaEndpoint(params *GetGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGiteaEndpointOK, error)
+
+ GetGithubEndpoint(params *GetGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGithubEndpointOK, error)
+
+ ListGiteaEndpoints(params *ListGiteaEndpointsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGiteaEndpointsOK, error)
+
+ ListGithubEndpoints(params *ListGithubEndpointsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGithubEndpointsOK, error)
+
+ UpdateGiteaEndpoint(params *UpdateGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGiteaEndpointOK, error)
+
+ UpdateGithubEndpoint(params *UpdateGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGithubEndpointOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+CreateGiteaEndpoint creates a gitea endpoint
+*/
+func (a *Client) CreateGiteaEndpoint(params *CreateGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGiteaEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateGiteaEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateGiteaEndpoint",
+ Method: "POST",
+ PathPattern: "/gitea/endpoints",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateGiteaEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateGiteaEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateGiteaEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+CreateGithubEndpoint creates a git hub endpoint
+*/
+func (a *Client) CreateGithubEndpoint(params *CreateGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateGithubEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateGithubEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateGithubEndpoint",
+ Method: "POST",
+ PathPattern: "/github/endpoints",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateGithubEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateGithubEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateGithubEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+DeleteGiteaEndpoint deletes a gitea endpoint
+*/
+func (a *Client) DeleteGiteaEndpoint(params *DeleteGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteGiteaEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteGiteaEndpoint",
+ Method: "DELETE",
+ PathPattern: "/gitea/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteGiteaEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+DeleteGithubEndpoint deletes a git hub endpoint
+*/
+func (a *Client) DeleteGithubEndpoint(params *DeleteGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteGithubEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteGithubEndpoint",
+ Method: "DELETE",
+ PathPattern: "/github/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteGithubEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+GetGiteaEndpoint gets a gitea endpoint
+*/
+func (a *Client) GetGiteaEndpoint(params *GetGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGiteaEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetGiteaEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetGiteaEndpoint",
+ Method: "GET",
+ PathPattern: "/gitea/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetGiteaEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetGiteaEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetGiteaEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+GetGithubEndpoint gets a git hub endpoint
+*/
+func (a *Client) GetGithubEndpoint(params *GetGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetGithubEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetGithubEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetGithubEndpoint",
+ Method: "GET",
+ PathPattern: "/github/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetGithubEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetGithubEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetGithubEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListGiteaEndpoints lists all gitea endpoints
+*/
+func (a *Client) ListGiteaEndpoints(params *ListGiteaEndpointsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGiteaEndpointsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListGiteaEndpointsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListGiteaEndpoints",
+ Method: "GET",
+ PathPattern: "/gitea/endpoints",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListGiteaEndpointsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListGiteaEndpointsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListGiteaEndpointsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListGithubEndpoints lists all git hub endpoints
+*/
+func (a *Client) ListGithubEndpoints(params *ListGithubEndpointsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListGithubEndpointsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListGithubEndpointsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListGithubEndpoints",
+ Method: "GET",
+ PathPattern: "/github/endpoints",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListGithubEndpointsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListGithubEndpointsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListGithubEndpointsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UpdateGiteaEndpoint updates a gitea endpoint
+*/
+func (a *Client) UpdateGiteaEndpoint(params *UpdateGiteaEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGiteaEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateGiteaEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateGiteaEndpoint",
+ Method: "PUT",
+ PathPattern: "/gitea/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateGiteaEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateGiteaEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateGiteaEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UpdateGithubEndpoint updates a git hub endpoint
+*/
+func (a *Client) UpdateGithubEndpoint(params *UpdateGithubEndpointParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateGithubEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateGithubEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateGithubEndpoint",
+ Method: "PUT",
+ PathPattern: "/github/endpoints/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateGithubEndpointReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateGithubEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateGithubEndpointDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/endpoints/get_gitea_endpoint_parameters.go b/client/endpoints/get_gitea_endpoint_parameters.go
new file mode 100644
index 00000000..0d7f883b
--- /dev/null
+++ b/client/endpoints/get_gitea_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetGiteaEndpointParams creates a new GetGiteaEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetGiteaEndpointParams() *GetGiteaEndpointParams {
+ return &GetGiteaEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetGiteaEndpointParamsWithTimeout creates a new GetGiteaEndpointParams object
+// with the ability to set a timeout on a request.
+func NewGetGiteaEndpointParamsWithTimeout(timeout time.Duration) *GetGiteaEndpointParams {
+ return &GetGiteaEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetGiteaEndpointParamsWithContext creates a new GetGiteaEndpointParams object
+// with the ability to set a context for a request.
+func NewGetGiteaEndpointParamsWithContext(ctx context.Context) *GetGiteaEndpointParams {
+ return &GetGiteaEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewGetGiteaEndpointParamsWithHTTPClient creates a new GetGiteaEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetGiteaEndpointParamsWithHTTPClient(client *http.Client) *GetGiteaEndpointParams {
+ return &GetGiteaEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetGiteaEndpointParams contains all the parameters to send to the API endpoint
+
+ for the get gitea endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetGiteaEndpointParams struct {
+
+ /* Name.
+
+ The name of the Gitea endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGiteaEndpointParams) WithDefaults() *GetGiteaEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGiteaEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) WithTimeout(timeout time.Duration) *GetGiteaEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) WithContext(ctx context.Context) *GetGiteaEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) WithHTTPClient(client *http.Client) *GetGiteaEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithName adds the name to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) WithName(name string) *GetGiteaEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the get gitea endpoint params
+func (o *GetGiteaEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetGiteaEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/get_gitea_endpoint_responses.go b/client/endpoints/get_gitea_endpoint_responses.go
new file mode 100644
index 00000000..e4bacd03
--- /dev/null
+++ b/client/endpoints/get_gitea_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetGiteaEndpointReader is a Reader for the GetGiteaEndpoint structure.
+type GetGiteaEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetGiteaEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetGiteaEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetGiteaEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetGiteaEndpointOK creates a GetGiteaEndpointOK with default headers values
+func NewGetGiteaEndpointOK() *GetGiteaEndpointOK {
+ return &GetGiteaEndpointOK{}
+}
+
+/*
+GetGiteaEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type GetGiteaEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this get gitea endpoint o k response has a 2xx status code
+func (o *GetGiteaEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get gitea endpoint o k response has a 3xx status code
+func (o *GetGiteaEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get gitea endpoint o k response has a 4xx status code
+func (o *GetGiteaEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get gitea endpoint o k response has a 5xx status code
+func (o *GetGiteaEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get gitea endpoint o k response a status code equal to that given
+func (o *GetGiteaEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get gitea endpoint o k response
+func (o *GetGiteaEndpointOK) Code() int {
+ return 200
+}
+
+func (o *GetGiteaEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints/{name}][%d] getGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *GetGiteaEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints/{name}][%d] getGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *GetGiteaEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *GetGiteaEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetGiteaEndpointDefault creates a GetGiteaEndpointDefault with default headers values
+func NewGetGiteaEndpointDefault(code int) *GetGiteaEndpointDefault {
+ return &GetGiteaEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetGiteaEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetGiteaEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get gitea endpoint default response has a 2xx status code
+func (o *GetGiteaEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get gitea endpoint default response has a 3xx status code
+func (o *GetGiteaEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get gitea endpoint default response has a 4xx status code
+func (o *GetGiteaEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get gitea endpoint default response has a 5xx status code
+func (o *GetGiteaEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get gitea endpoint default response a status code equal to that given
+func (o *GetGiteaEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get gitea endpoint default response
+func (o *GetGiteaEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetGiteaEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints/{name}][%d] GetGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *GetGiteaEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints/{name}][%d] GetGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *GetGiteaEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetGiteaEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/get_github_endpoint_parameters.go b/client/endpoints/get_github_endpoint_parameters.go
new file mode 100644
index 00000000..7bd9ca00
--- /dev/null
+++ b/client/endpoints/get_github_endpoint_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetGithubEndpointParams creates a new GetGithubEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetGithubEndpointParams() *GetGithubEndpointParams {
+ return &GetGithubEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetGithubEndpointParamsWithTimeout creates a new GetGithubEndpointParams object
+// with the ability to set a timeout on a request.
+func NewGetGithubEndpointParamsWithTimeout(timeout time.Duration) *GetGithubEndpointParams {
+ return &GetGithubEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetGithubEndpointParamsWithContext creates a new GetGithubEndpointParams object
+// with the ability to set a context for a request.
+func NewGetGithubEndpointParamsWithContext(ctx context.Context) *GetGithubEndpointParams {
+ return &GetGithubEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewGetGithubEndpointParamsWithHTTPClient creates a new GetGithubEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetGithubEndpointParamsWithHTTPClient(client *http.Client) *GetGithubEndpointParams {
+ return &GetGithubEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetGithubEndpointParams contains all the parameters to send to the API endpoint
+
+ for the get github endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetGithubEndpointParams struct {
+
+ /* Name.
+
+ The name of the GitHub endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGithubEndpointParams) WithDefaults() *GetGithubEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetGithubEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get github endpoint params
+func (o *GetGithubEndpointParams) WithTimeout(timeout time.Duration) *GetGithubEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get github endpoint params
+func (o *GetGithubEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get github endpoint params
+func (o *GetGithubEndpointParams) WithContext(ctx context.Context) *GetGithubEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get github endpoint params
+func (o *GetGithubEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get github endpoint params
+func (o *GetGithubEndpointParams) WithHTTPClient(client *http.Client) *GetGithubEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get github endpoint params
+func (o *GetGithubEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithName adds the name to the get github endpoint params
+func (o *GetGithubEndpointParams) WithName(name string) *GetGithubEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the get github endpoint params
+func (o *GetGithubEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetGithubEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/get_github_endpoint_responses.go b/client/endpoints/get_github_endpoint_responses.go
new file mode 100644
index 00000000..e2b97a60
--- /dev/null
+++ b/client/endpoints/get_github_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetGithubEndpointReader is a Reader for the GetGithubEndpoint structure.
+type GetGithubEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetGithubEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetGithubEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetGithubEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetGithubEndpointOK creates a GetGithubEndpointOK with default headers values
+func NewGetGithubEndpointOK() *GetGithubEndpointOK {
+ return &GetGithubEndpointOK{}
+}
+
+/*
+GetGithubEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type GetGithubEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this get github endpoint o k response has a 2xx status code
+func (o *GetGithubEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get github endpoint o k response has a 3xx status code
+func (o *GetGithubEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get github endpoint o k response has a 4xx status code
+func (o *GetGithubEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get github endpoint o k response has a 5xx status code
+func (o *GetGithubEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get github endpoint o k response a status code equal to that given
+func (o *GetGithubEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get github endpoint o k response
+func (o *GetGithubEndpointOK) Code() int {
+ return 200
+}
+
+func (o *GetGithubEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints/{name}][%d] getGithubEndpointOK %s", 200, payload)
+}
+
+func (o *GetGithubEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints/{name}][%d] getGithubEndpointOK %s", 200, payload)
+}
+
+func (o *GetGithubEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *GetGithubEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetGithubEndpointDefault creates a GetGithubEndpointDefault with default headers values
+func NewGetGithubEndpointDefault(code int) *GetGithubEndpointDefault {
+ return &GetGithubEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetGithubEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetGithubEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get github endpoint default response has a 2xx status code
+func (o *GetGithubEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get github endpoint default response has a 3xx status code
+func (o *GetGithubEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get github endpoint default response has a 4xx status code
+func (o *GetGithubEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get github endpoint default response has a 5xx status code
+func (o *GetGithubEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get github endpoint default response a status code equal to that given
+func (o *GetGithubEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get github endpoint default response
+func (o *GetGithubEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetGithubEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints/{name}][%d] GetGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *GetGithubEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints/{name}][%d] GetGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *GetGithubEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetGithubEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/list_gitea_endpoints_parameters.go b/client/endpoints/list_gitea_endpoints_parameters.go
new file mode 100644
index 00000000..93ec6ae6
--- /dev/null
+++ b/client/endpoints/list_gitea_endpoints_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListGiteaEndpointsParams creates a new ListGiteaEndpointsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListGiteaEndpointsParams() *ListGiteaEndpointsParams {
+ return &ListGiteaEndpointsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListGiteaEndpointsParamsWithTimeout creates a new ListGiteaEndpointsParams object
+// with the ability to set a timeout on a request.
+func NewListGiteaEndpointsParamsWithTimeout(timeout time.Duration) *ListGiteaEndpointsParams {
+ return &ListGiteaEndpointsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListGiteaEndpointsParamsWithContext creates a new ListGiteaEndpointsParams object
+// with the ability to set a context for a request.
+func NewListGiteaEndpointsParamsWithContext(ctx context.Context) *ListGiteaEndpointsParams {
+ return &ListGiteaEndpointsParams{
+ Context: ctx,
+ }
+}
+
+// NewListGiteaEndpointsParamsWithHTTPClient creates a new ListGiteaEndpointsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListGiteaEndpointsParamsWithHTTPClient(client *http.Client) *ListGiteaEndpointsParams {
+ return &ListGiteaEndpointsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListGiteaEndpointsParams contains all the parameters to send to the API endpoint
+
+ for the list gitea endpoints operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListGiteaEndpointsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list gitea endpoints params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGiteaEndpointsParams) WithDefaults() *ListGiteaEndpointsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list gitea endpoints params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGiteaEndpointsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) WithTimeout(timeout time.Duration) *ListGiteaEndpointsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) WithContext(ctx context.Context) *ListGiteaEndpointsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) WithHTTPClient(client *http.Client) *ListGiteaEndpointsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list gitea endpoints params
+func (o *ListGiteaEndpointsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListGiteaEndpointsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/list_gitea_endpoints_responses.go b/client/endpoints/list_gitea_endpoints_responses.go
new file mode 100644
index 00000000..0fdd90ec
--- /dev/null
+++ b/client/endpoints/list_gitea_endpoints_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListGiteaEndpointsReader is a Reader for the ListGiteaEndpoints structure.
+type ListGiteaEndpointsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListGiteaEndpointsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListGiteaEndpointsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListGiteaEndpointsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListGiteaEndpointsOK creates a ListGiteaEndpointsOK with default headers values
+func NewListGiteaEndpointsOK() *ListGiteaEndpointsOK {
+ return &ListGiteaEndpointsOK{}
+}
+
+/*
+ListGiteaEndpointsOK describes a response with status code 200, with default header values.
+
+ForgeEndpoints
+*/
+type ListGiteaEndpointsOK struct {
+ Payload garm_params.ForgeEndpoints
+}
+
+// IsSuccess returns true when this list gitea endpoints o k response has a 2xx status code
+func (o *ListGiteaEndpointsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list gitea endpoints o k response has a 3xx status code
+func (o *ListGiteaEndpointsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list gitea endpoints o k response has a 4xx status code
+func (o *ListGiteaEndpointsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list gitea endpoints o k response has a 5xx status code
+func (o *ListGiteaEndpointsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list gitea endpoints o k response a status code equal to that given
+func (o *ListGiteaEndpointsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list gitea endpoints o k response
+func (o *ListGiteaEndpointsOK) Code() int {
+ return 200
+}
+
+func (o *ListGiteaEndpointsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints][%d] listGiteaEndpointsOK %s", 200, payload)
+}
+
+func (o *ListGiteaEndpointsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints][%d] listGiteaEndpointsOK %s", 200, payload)
+}
+
+func (o *ListGiteaEndpointsOK) GetPayload() garm_params.ForgeEndpoints {
+ return o.Payload
+}
+
+func (o *ListGiteaEndpointsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListGiteaEndpointsDefault creates a ListGiteaEndpointsDefault with default headers values
+func NewListGiteaEndpointsDefault(code int) *ListGiteaEndpointsDefault {
+ return &ListGiteaEndpointsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListGiteaEndpointsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListGiteaEndpointsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list gitea endpoints default response has a 2xx status code
+func (o *ListGiteaEndpointsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list gitea endpoints default response has a 3xx status code
+func (o *ListGiteaEndpointsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list gitea endpoints default response has a 4xx status code
+func (o *ListGiteaEndpointsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list gitea endpoints default response has a 5xx status code
+func (o *ListGiteaEndpointsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list gitea endpoints default response a status code equal to that given
+func (o *ListGiteaEndpointsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list gitea endpoints default response
+func (o *ListGiteaEndpointsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListGiteaEndpointsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints][%d] ListGiteaEndpoints default %s", o._statusCode, payload)
+}
+
+func (o *ListGiteaEndpointsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /gitea/endpoints][%d] ListGiteaEndpoints default %s", o._statusCode, payload)
+}
+
+func (o *ListGiteaEndpointsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListGiteaEndpointsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/list_github_endpoints_parameters.go b/client/endpoints/list_github_endpoints_parameters.go
new file mode 100644
index 00000000..c002cfe4
--- /dev/null
+++ b/client/endpoints/list_github_endpoints_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListGithubEndpointsParams creates a new ListGithubEndpointsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListGithubEndpointsParams() *ListGithubEndpointsParams {
+ return &ListGithubEndpointsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListGithubEndpointsParamsWithTimeout creates a new ListGithubEndpointsParams object
+// with the ability to set a timeout on a request.
+func NewListGithubEndpointsParamsWithTimeout(timeout time.Duration) *ListGithubEndpointsParams {
+ return &ListGithubEndpointsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListGithubEndpointsParamsWithContext creates a new ListGithubEndpointsParams object
+// with the ability to set a context for a request.
+func NewListGithubEndpointsParamsWithContext(ctx context.Context) *ListGithubEndpointsParams {
+ return &ListGithubEndpointsParams{
+ Context: ctx,
+ }
+}
+
+// NewListGithubEndpointsParamsWithHTTPClient creates a new ListGithubEndpointsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListGithubEndpointsParamsWithHTTPClient(client *http.Client) *ListGithubEndpointsParams {
+ return &ListGithubEndpointsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListGithubEndpointsParams contains all the parameters to send to the API endpoint
+
+ for the list github endpoints operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListGithubEndpointsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list github endpoints params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGithubEndpointsParams) WithDefaults() *ListGithubEndpointsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list github endpoints params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListGithubEndpointsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list github endpoints params
+func (o *ListGithubEndpointsParams) WithTimeout(timeout time.Duration) *ListGithubEndpointsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list github endpoints params
+func (o *ListGithubEndpointsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list github endpoints params
+func (o *ListGithubEndpointsParams) WithContext(ctx context.Context) *ListGithubEndpointsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list github endpoints params
+func (o *ListGithubEndpointsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list github endpoints params
+func (o *ListGithubEndpointsParams) WithHTTPClient(client *http.Client) *ListGithubEndpointsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list github endpoints params
+func (o *ListGithubEndpointsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListGithubEndpointsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/list_github_endpoints_responses.go b/client/endpoints/list_github_endpoints_responses.go
new file mode 100644
index 00000000..33485f9b
--- /dev/null
+++ b/client/endpoints/list_github_endpoints_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListGithubEndpointsReader is a Reader for the ListGithubEndpoints structure.
+type ListGithubEndpointsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListGithubEndpointsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListGithubEndpointsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListGithubEndpointsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListGithubEndpointsOK creates a ListGithubEndpointsOK with default headers values
+func NewListGithubEndpointsOK() *ListGithubEndpointsOK {
+ return &ListGithubEndpointsOK{}
+}
+
+/*
+ListGithubEndpointsOK describes a response with status code 200, with default header values.
+
+ForgeEndpoints
+*/
+type ListGithubEndpointsOK struct {
+ Payload garm_params.ForgeEndpoints
+}
+
+// IsSuccess returns true when this list github endpoints o k response has a 2xx status code
+func (o *ListGithubEndpointsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list github endpoints o k response has a 3xx status code
+func (o *ListGithubEndpointsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list github endpoints o k response has a 4xx status code
+func (o *ListGithubEndpointsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list github endpoints o k response has a 5xx status code
+func (o *ListGithubEndpointsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list github endpoints o k response a status code equal to that given
+func (o *ListGithubEndpointsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list github endpoints o k response
+func (o *ListGithubEndpointsOK) Code() int {
+ return 200
+}
+
+func (o *ListGithubEndpointsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints][%d] listGithubEndpointsOK %s", 200, payload)
+}
+
+func (o *ListGithubEndpointsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints][%d] listGithubEndpointsOK %s", 200, payload)
+}
+
+func (o *ListGithubEndpointsOK) GetPayload() garm_params.ForgeEndpoints {
+ return o.Payload
+}
+
+func (o *ListGithubEndpointsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListGithubEndpointsDefault creates a ListGithubEndpointsDefault with default headers values
+func NewListGithubEndpointsDefault(code int) *ListGithubEndpointsDefault {
+ return &ListGithubEndpointsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListGithubEndpointsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListGithubEndpointsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list github endpoints default response has a 2xx status code
+func (o *ListGithubEndpointsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list github endpoints default response has a 3xx status code
+func (o *ListGithubEndpointsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list github endpoints default response has a 4xx status code
+func (o *ListGithubEndpointsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list github endpoints default response has a 5xx status code
+func (o *ListGithubEndpointsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list github endpoints default response a status code equal to that given
+func (o *ListGithubEndpointsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list github endpoints default response
+func (o *ListGithubEndpointsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListGithubEndpointsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints][%d] ListGithubEndpoints default %s", o._statusCode, payload)
+}
+
+func (o *ListGithubEndpointsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /github/endpoints][%d] ListGithubEndpoints default %s", o._statusCode, payload)
+}
+
+func (o *ListGithubEndpointsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListGithubEndpointsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/update_gitea_endpoint_parameters.go b/client/endpoints/update_gitea_endpoint_parameters.go
new file mode 100644
index 00000000..bfd18e2e
--- /dev/null
+++ b/client/endpoints/update_gitea_endpoint_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateGiteaEndpointParams creates a new UpdateGiteaEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateGiteaEndpointParams() *UpdateGiteaEndpointParams {
+ return &UpdateGiteaEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateGiteaEndpointParamsWithTimeout creates a new UpdateGiteaEndpointParams object
+// with the ability to set a timeout on a request.
+func NewUpdateGiteaEndpointParamsWithTimeout(timeout time.Duration) *UpdateGiteaEndpointParams {
+ return &UpdateGiteaEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateGiteaEndpointParamsWithContext creates a new UpdateGiteaEndpointParams object
+// with the ability to set a context for a request.
+func NewUpdateGiteaEndpointParamsWithContext(ctx context.Context) *UpdateGiteaEndpointParams {
+ return &UpdateGiteaEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateGiteaEndpointParamsWithHTTPClient creates a new UpdateGiteaEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateGiteaEndpointParamsWithHTTPClient(client *http.Client) *UpdateGiteaEndpointParams {
+ return &UpdateGiteaEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateGiteaEndpointParams contains all the parameters to send to the API endpoint
+
+ for the update gitea endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateGiteaEndpointParams struct {
+
+ /* Body.
+
+ Parameters used when updating a Gitea endpoint.
+ */
+ Body garm_params.UpdateGiteaEndpointParams
+
+ /* Name.
+
+ The name of the Gitea endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGiteaEndpointParams) WithDefaults() *UpdateGiteaEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update gitea endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGiteaEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) WithTimeout(timeout time.Duration) *UpdateGiteaEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) WithContext(ctx context.Context) *UpdateGiteaEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) WithHTTPClient(client *http.Client) *UpdateGiteaEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) WithBody(body garm_params.UpdateGiteaEndpointParams) *UpdateGiteaEndpointParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) SetBody(body garm_params.UpdateGiteaEndpointParams) {
+ o.Body = body
+}
+
+// WithName adds the name to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) WithName(name string) *UpdateGiteaEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the update gitea endpoint params
+func (o *UpdateGiteaEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateGiteaEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/update_gitea_endpoint_responses.go b/client/endpoints/update_gitea_endpoint_responses.go
new file mode 100644
index 00000000..052f45fa
--- /dev/null
+++ b/client/endpoints/update_gitea_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateGiteaEndpointReader is a Reader for the UpdateGiteaEndpoint structure.
+type UpdateGiteaEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateGiteaEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateGiteaEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateGiteaEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateGiteaEndpointOK creates a UpdateGiteaEndpointOK with default headers values
+func NewUpdateGiteaEndpointOK() *UpdateGiteaEndpointOK {
+ return &UpdateGiteaEndpointOK{}
+}
+
+/*
+UpdateGiteaEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type UpdateGiteaEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this update gitea endpoint o k response has a 2xx status code
+func (o *UpdateGiteaEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update gitea endpoint o k response has a 3xx status code
+func (o *UpdateGiteaEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update gitea endpoint o k response has a 4xx status code
+func (o *UpdateGiteaEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update gitea endpoint o k response has a 5xx status code
+func (o *UpdateGiteaEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update gitea endpoint o k response a status code equal to that given
+func (o *UpdateGiteaEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update gitea endpoint o k response
+func (o *UpdateGiteaEndpointOK) Code() int {
+ return 200
+}
+
+func (o *UpdateGiteaEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/endpoints/{name}][%d] updateGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *UpdateGiteaEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/endpoints/{name}][%d] updateGiteaEndpointOK %s", 200, payload)
+}
+
+func (o *UpdateGiteaEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *UpdateGiteaEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateGiteaEndpointDefault creates a UpdateGiteaEndpointDefault with default headers values
+func NewUpdateGiteaEndpointDefault(code int) *UpdateGiteaEndpointDefault {
+ return &UpdateGiteaEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateGiteaEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateGiteaEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update gitea endpoint default response has a 2xx status code
+func (o *UpdateGiteaEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update gitea endpoint default response has a 3xx status code
+func (o *UpdateGiteaEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update gitea endpoint default response has a 4xx status code
+func (o *UpdateGiteaEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update gitea endpoint default response has a 5xx status code
+func (o *UpdateGiteaEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update gitea endpoint default response a status code equal to that given
+func (o *UpdateGiteaEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update gitea endpoint default response
+func (o *UpdateGiteaEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateGiteaEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/endpoints/{name}][%d] UpdateGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *UpdateGiteaEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /gitea/endpoints/{name}][%d] UpdateGiteaEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *UpdateGiteaEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateGiteaEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/endpoints/update_github_endpoint_parameters.go b/client/endpoints/update_github_endpoint_parameters.go
new file mode 100644
index 00000000..35ee713a
--- /dev/null
+++ b/client/endpoints/update_github_endpoint_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateGithubEndpointParams creates a new UpdateGithubEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateGithubEndpointParams() *UpdateGithubEndpointParams {
+ return &UpdateGithubEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateGithubEndpointParamsWithTimeout creates a new UpdateGithubEndpointParams object
+// with the ability to set a timeout on a request.
+func NewUpdateGithubEndpointParamsWithTimeout(timeout time.Duration) *UpdateGithubEndpointParams {
+ return &UpdateGithubEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateGithubEndpointParamsWithContext creates a new UpdateGithubEndpointParams object
+// with the ability to set a context for a request.
+func NewUpdateGithubEndpointParamsWithContext(ctx context.Context) *UpdateGithubEndpointParams {
+ return &UpdateGithubEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateGithubEndpointParamsWithHTTPClient creates a new UpdateGithubEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateGithubEndpointParamsWithHTTPClient(client *http.Client) *UpdateGithubEndpointParams {
+ return &UpdateGithubEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateGithubEndpointParams contains all the parameters to send to the API endpoint
+
+ for the update github endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateGithubEndpointParams struct {
+
+ /* Body.
+
+ Parameters used when updating a GitHub endpoint.
+ */
+ Body garm_params.UpdateGithubEndpointParams
+
+ /* Name.
+
+ The name of the GitHub endpoint.
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGithubEndpointParams) WithDefaults() *UpdateGithubEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update github endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateGithubEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update github endpoint params
+func (o *UpdateGithubEndpointParams) WithTimeout(timeout time.Duration) *UpdateGithubEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update github endpoint params
+func (o *UpdateGithubEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update github endpoint params
+func (o *UpdateGithubEndpointParams) WithContext(ctx context.Context) *UpdateGithubEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update github endpoint params
+func (o *UpdateGithubEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update github endpoint params
+func (o *UpdateGithubEndpointParams) WithHTTPClient(client *http.Client) *UpdateGithubEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update github endpoint params
+func (o *UpdateGithubEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update github endpoint params
+func (o *UpdateGithubEndpointParams) WithBody(body garm_params.UpdateGithubEndpointParams) *UpdateGithubEndpointParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update github endpoint params
+func (o *UpdateGithubEndpointParams) SetBody(body garm_params.UpdateGithubEndpointParams) {
+ o.Body = body
+}
+
+// WithName adds the name to the update github endpoint params
+func (o *UpdateGithubEndpointParams) WithName(name string) *UpdateGithubEndpointParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the update github endpoint params
+func (o *UpdateGithubEndpointParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateGithubEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/endpoints/update_github_endpoint_responses.go b/client/endpoints/update_github_endpoint_responses.go
new file mode 100644
index 00000000..27cd4a71
--- /dev/null
+++ b/client/endpoints/update_github_endpoint_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package endpoints
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateGithubEndpointReader is a Reader for the UpdateGithubEndpoint structure.
+type UpdateGithubEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateGithubEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateGithubEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateGithubEndpointDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateGithubEndpointOK creates a UpdateGithubEndpointOK with default headers values
+func NewUpdateGithubEndpointOK() *UpdateGithubEndpointOK {
+ return &UpdateGithubEndpointOK{}
+}
+
+/*
+UpdateGithubEndpointOK describes a response with status code 200, with default header values.
+
+ForgeEndpoint
+*/
+type UpdateGithubEndpointOK struct {
+ Payload garm_params.ForgeEndpoint
+}
+
+// IsSuccess returns true when this update github endpoint o k response has a 2xx status code
+func (o *UpdateGithubEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update github endpoint o k response has a 3xx status code
+func (o *UpdateGithubEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update github endpoint o k response has a 4xx status code
+func (o *UpdateGithubEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update github endpoint o k response has a 5xx status code
+func (o *UpdateGithubEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update github endpoint o k response a status code equal to that given
+func (o *UpdateGithubEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update github endpoint o k response
+func (o *UpdateGithubEndpointOK) Code() int {
+ return 200
+}
+
+func (o *UpdateGithubEndpointOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/endpoints/{name}][%d] updateGithubEndpointOK %s", 200, payload)
+}
+
+func (o *UpdateGithubEndpointOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/endpoints/{name}][%d] updateGithubEndpointOK %s", 200, payload)
+}
+
+func (o *UpdateGithubEndpointOK) GetPayload() garm_params.ForgeEndpoint {
+ return o.Payload
+}
+
+func (o *UpdateGithubEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateGithubEndpointDefault creates a UpdateGithubEndpointDefault with default headers values
+func NewUpdateGithubEndpointDefault(code int) *UpdateGithubEndpointDefault {
+ return &UpdateGithubEndpointDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateGithubEndpointDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateGithubEndpointDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update github endpoint default response has a 2xx status code
+func (o *UpdateGithubEndpointDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update github endpoint default response has a 3xx status code
+func (o *UpdateGithubEndpointDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update github endpoint default response has a 4xx status code
+func (o *UpdateGithubEndpointDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update github endpoint default response has a 5xx status code
+func (o *UpdateGithubEndpointDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update github endpoint default response a status code equal to that given
+func (o *UpdateGithubEndpointDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update github endpoint default response
+func (o *UpdateGithubEndpointDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateGithubEndpointDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/endpoints/{name}][%d] UpdateGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *UpdateGithubEndpointDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /github/endpoints/{name}][%d] UpdateGithubEndpoint default %s", o._statusCode, payload)
+}
+
+func (o *UpdateGithubEndpointDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateGithubEndpointDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/create_enterprise_parameters.go b/client/enterprises/create_enterprise_parameters.go
new file mode 100644
index 00000000..9b62264a
--- /dev/null
+++ b/client/enterprises/create_enterprise_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateEnterpriseParams creates a new CreateEnterpriseParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateEnterpriseParams() *CreateEnterpriseParams {
+ return &CreateEnterpriseParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateEnterpriseParamsWithTimeout creates a new CreateEnterpriseParams object
+// with the ability to set a timeout on a request.
+func NewCreateEnterpriseParamsWithTimeout(timeout time.Duration) *CreateEnterpriseParams {
+ return &CreateEnterpriseParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateEnterpriseParamsWithContext creates a new CreateEnterpriseParams object
+// with the ability to set a context for a request.
+func NewCreateEnterpriseParamsWithContext(ctx context.Context) *CreateEnterpriseParams {
+ return &CreateEnterpriseParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateEnterpriseParamsWithHTTPClient creates a new CreateEnterpriseParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateEnterpriseParamsWithHTTPClient(client *http.Client) *CreateEnterpriseParams {
+ return &CreateEnterpriseParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateEnterpriseParams contains all the parameters to send to the API endpoint
+
+ for the create enterprise operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateEnterpriseParams struct {
+
+ /* Body.
+
+ Parameters used to create the enterprise.
+ */
+ Body garm_params.CreateEnterpriseParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create enterprise params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateEnterpriseParams) WithDefaults() *CreateEnterpriseParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create enterprise params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateEnterpriseParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create enterprise params
+func (o *CreateEnterpriseParams) WithTimeout(timeout time.Duration) *CreateEnterpriseParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create enterprise params
+func (o *CreateEnterpriseParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create enterprise params
+func (o *CreateEnterpriseParams) WithContext(ctx context.Context) *CreateEnterpriseParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create enterprise params
+func (o *CreateEnterpriseParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create enterprise params
+func (o *CreateEnterpriseParams) WithHTTPClient(client *http.Client) *CreateEnterpriseParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create enterprise params
+func (o *CreateEnterpriseParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create enterprise params
+func (o *CreateEnterpriseParams) WithBody(body garm_params.CreateEnterpriseParams) *CreateEnterpriseParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create enterprise params
+func (o *CreateEnterpriseParams) SetBody(body garm_params.CreateEnterpriseParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateEnterpriseParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/create_enterprise_pool_parameters.go b/client/enterprises/create_enterprise_pool_parameters.go
new file mode 100644
index 00000000..348d080e
--- /dev/null
+++ b/client/enterprises/create_enterprise_pool_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateEnterprisePoolParams creates a new CreateEnterprisePoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateEnterprisePoolParams() *CreateEnterprisePoolParams {
+ return &CreateEnterprisePoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateEnterprisePoolParamsWithTimeout creates a new CreateEnterprisePoolParams object
+// with the ability to set a timeout on a request.
+func NewCreateEnterprisePoolParamsWithTimeout(timeout time.Duration) *CreateEnterprisePoolParams {
+ return &CreateEnterprisePoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateEnterprisePoolParamsWithContext creates a new CreateEnterprisePoolParams object
+// with the ability to set a context for a request.
+func NewCreateEnterprisePoolParamsWithContext(ctx context.Context) *CreateEnterprisePoolParams {
+ return &CreateEnterprisePoolParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateEnterprisePoolParamsWithHTTPClient creates a new CreateEnterprisePoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateEnterprisePoolParamsWithHTTPClient(client *http.Client) *CreateEnterprisePoolParams {
+ return &CreateEnterprisePoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateEnterprisePoolParams contains all the parameters to send to the API endpoint
+
+ for the create enterprise pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateEnterprisePoolParams struct {
+
+ /* Body.
+
+ Parameters used when creating the enterprise pool.
+ */
+ Body garm_params.CreatePoolParams
+
+ /* EnterpriseID.
+
+ Enterprise ID.
+ */
+ EnterpriseID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create enterprise pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateEnterprisePoolParams) WithDefaults() *CreateEnterprisePoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create enterprise pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateEnterprisePoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create enterprise pool params
+func (o *CreateEnterprisePoolParams) WithTimeout(timeout time.Duration) *CreateEnterprisePoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create enterprise pool params
+func (o *CreateEnterprisePoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create enterprise pool params
+func (o *CreateEnterprisePoolParams) WithContext(ctx context.Context) *CreateEnterprisePoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create enterprise pool params
+func (o *CreateEnterprisePoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create enterprise pool params
+func (o *CreateEnterprisePoolParams) WithHTTPClient(client *http.Client) *CreateEnterprisePoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create enterprise pool params
+func (o *CreateEnterprisePoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create enterprise pool params
+func (o *CreateEnterprisePoolParams) WithBody(body garm_params.CreatePoolParams) *CreateEnterprisePoolParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create enterprise pool params
+func (o *CreateEnterprisePoolParams) SetBody(body garm_params.CreatePoolParams) {
+ o.Body = body
+}
+
+// WithEnterpriseID adds the enterpriseID to the create enterprise pool params
+func (o *CreateEnterprisePoolParams) WithEnterpriseID(enterpriseID string) *CreateEnterprisePoolParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the create enterprise pool params
+func (o *CreateEnterprisePoolParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateEnterprisePoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/create_enterprise_pool_responses.go b/client/enterprises/create_enterprise_pool_responses.go
new file mode 100644
index 00000000..38331fbc
--- /dev/null
+++ b/client/enterprises/create_enterprise_pool_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateEnterprisePoolReader is a Reader for the CreateEnterprisePool structure.
+type CreateEnterprisePoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateEnterprisePoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateEnterprisePoolOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateEnterprisePoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateEnterprisePoolOK creates a CreateEnterprisePoolOK with default headers values
+func NewCreateEnterprisePoolOK() *CreateEnterprisePoolOK {
+ return &CreateEnterprisePoolOK{}
+}
+
+/*
+CreateEnterprisePoolOK describes a response with status code 200, with default header values.
+
+Pool
+*/
+type CreateEnterprisePoolOK struct {
+ Payload garm_params.Pool
+}
+
+// IsSuccess returns true when this create enterprise pool o k response has a 2xx status code
+func (o *CreateEnterprisePoolOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create enterprise pool o k response has a 3xx status code
+func (o *CreateEnterprisePoolOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create enterprise pool o k response has a 4xx status code
+func (o *CreateEnterprisePoolOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create enterprise pool o k response has a 5xx status code
+func (o *CreateEnterprisePoolOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create enterprise pool o k response a status code equal to that given
+func (o *CreateEnterprisePoolOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create enterprise pool o k response
+func (o *CreateEnterprisePoolOK) Code() int {
+ return 200
+}
+
+func (o *CreateEnterprisePoolOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] createEnterprisePoolOK %s", 200, payload)
+}
+
+func (o *CreateEnterprisePoolOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] createEnterprisePoolOK %s", 200, payload)
+}
+
+func (o *CreateEnterprisePoolOK) GetPayload() garm_params.Pool {
+ return o.Payload
+}
+
+func (o *CreateEnterprisePoolOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateEnterprisePoolDefault creates a CreateEnterprisePoolDefault with default headers values
+func NewCreateEnterprisePoolDefault(code int) *CreateEnterprisePoolDefault {
+ return &CreateEnterprisePoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateEnterprisePoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateEnterprisePoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create enterprise pool default response has a 2xx status code
+func (o *CreateEnterprisePoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create enterprise pool default response has a 3xx status code
+func (o *CreateEnterprisePoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create enterprise pool default response has a 4xx status code
+func (o *CreateEnterprisePoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create enterprise pool default response has a 5xx status code
+func (o *CreateEnterprisePoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create enterprise pool default response a status code equal to that given
+func (o *CreateEnterprisePoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create enterprise pool default response
+func (o *CreateEnterprisePoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateEnterprisePoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] CreateEnterprisePool default %s", o._statusCode, payload)
+}
+
+func (o *CreateEnterprisePoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/pools][%d] CreateEnterprisePool default %s", o._statusCode, payload)
+}
+
+func (o *CreateEnterprisePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateEnterprisePoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/create_enterprise_responses.go b/client/enterprises/create_enterprise_responses.go
new file mode 100644
index 00000000..6623ad22
--- /dev/null
+++ b/client/enterprises/create_enterprise_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateEnterpriseReader is a Reader for the CreateEnterprise structure.
+type CreateEnterpriseReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateEnterpriseReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateEnterpriseOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateEnterpriseDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateEnterpriseOK creates a CreateEnterpriseOK with default headers values
+func NewCreateEnterpriseOK() *CreateEnterpriseOK {
+ return &CreateEnterpriseOK{}
+}
+
+/*
+CreateEnterpriseOK describes a response with status code 200, with default header values.
+
+Enterprise
+*/
+type CreateEnterpriseOK struct {
+ Payload garm_params.Enterprise
+}
+
+// IsSuccess returns true when this create enterprise o k response has a 2xx status code
+func (o *CreateEnterpriseOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create enterprise o k response has a 3xx status code
+func (o *CreateEnterpriseOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create enterprise o k response has a 4xx status code
+func (o *CreateEnterpriseOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create enterprise o k response has a 5xx status code
+func (o *CreateEnterpriseOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create enterprise o k response a status code equal to that given
+func (o *CreateEnterpriseOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create enterprise o k response
+func (o *CreateEnterpriseOK) Code() int {
+ return 200
+}
+
+func (o *CreateEnterpriseOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises][%d] createEnterpriseOK %s", 200, payload)
+}
+
+func (o *CreateEnterpriseOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises][%d] createEnterpriseOK %s", 200, payload)
+}
+
+func (o *CreateEnterpriseOK) GetPayload() garm_params.Enterprise {
+ return o.Payload
+}
+
+func (o *CreateEnterpriseOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateEnterpriseDefault creates a CreateEnterpriseDefault with default headers values
+func NewCreateEnterpriseDefault(code int) *CreateEnterpriseDefault {
+ return &CreateEnterpriseDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateEnterpriseDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateEnterpriseDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create enterprise default response has a 2xx status code
+func (o *CreateEnterpriseDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create enterprise default response has a 3xx status code
+func (o *CreateEnterpriseDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create enterprise default response has a 4xx status code
+func (o *CreateEnterpriseDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create enterprise default response has a 5xx status code
+func (o *CreateEnterpriseDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create enterprise default response a status code equal to that given
+func (o *CreateEnterpriseDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create enterprise default response
+func (o *CreateEnterpriseDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateEnterpriseDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises][%d] CreateEnterprise default %s", o._statusCode, payload)
+}
+
+func (o *CreateEnterpriseDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises][%d] CreateEnterprise default %s", o._statusCode, payload)
+}
+
+func (o *CreateEnterpriseDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateEnterpriseDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/create_enterprise_scale_set_parameters.go b/client/enterprises/create_enterprise_scale_set_parameters.go
new file mode 100644
index 00000000..76fe13ec
--- /dev/null
+++ b/client/enterprises/create_enterprise_scale_set_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateEnterpriseScaleSetParams creates a new CreateEnterpriseScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateEnterpriseScaleSetParams() *CreateEnterpriseScaleSetParams {
+ return &CreateEnterpriseScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateEnterpriseScaleSetParamsWithTimeout creates a new CreateEnterpriseScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewCreateEnterpriseScaleSetParamsWithTimeout(timeout time.Duration) *CreateEnterpriseScaleSetParams {
+ return &CreateEnterpriseScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateEnterpriseScaleSetParamsWithContext creates a new CreateEnterpriseScaleSetParams object
+// with the ability to set a context for a request.
+func NewCreateEnterpriseScaleSetParamsWithContext(ctx context.Context) *CreateEnterpriseScaleSetParams {
+ return &CreateEnterpriseScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateEnterpriseScaleSetParamsWithHTTPClient creates a new CreateEnterpriseScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateEnterpriseScaleSetParamsWithHTTPClient(client *http.Client) *CreateEnterpriseScaleSetParams {
+ return &CreateEnterpriseScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateEnterpriseScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the create enterprise scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateEnterpriseScaleSetParams struct {
+
+ /* Body.
+
+ Parameters used when creating the enterprise scale set.
+ */
+ Body garm_params.CreateScaleSetParams
+
+ /* EnterpriseID.
+
+ Enterprise ID.
+ */
+ EnterpriseID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create enterprise scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateEnterpriseScaleSetParams) WithDefaults() *CreateEnterpriseScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create enterprise scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateEnterpriseScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) WithTimeout(timeout time.Duration) *CreateEnterpriseScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) WithContext(ctx context.Context) *CreateEnterpriseScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) WithHTTPClient(client *http.Client) *CreateEnterpriseScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) WithBody(body garm_params.CreateScaleSetParams) *CreateEnterpriseScaleSetParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) SetBody(body garm_params.CreateScaleSetParams) {
+ o.Body = body
+}
+
+// WithEnterpriseID adds the enterpriseID to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) WithEnterpriseID(enterpriseID string) *CreateEnterpriseScaleSetParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the create enterprise scale set params
+func (o *CreateEnterpriseScaleSetParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateEnterpriseScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/create_enterprise_scale_set_responses.go b/client/enterprises/create_enterprise_scale_set_responses.go
new file mode 100644
index 00000000..46107fc3
--- /dev/null
+++ b/client/enterprises/create_enterprise_scale_set_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateEnterpriseScaleSetReader is a Reader for the CreateEnterpriseScaleSet structure.
+type CreateEnterpriseScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateEnterpriseScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateEnterpriseScaleSetOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateEnterpriseScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateEnterpriseScaleSetOK creates a CreateEnterpriseScaleSetOK with default headers values
+func NewCreateEnterpriseScaleSetOK() *CreateEnterpriseScaleSetOK {
+ return &CreateEnterpriseScaleSetOK{}
+}
+
+/*
+CreateEnterpriseScaleSetOK describes a response with status code 200, with default header values.
+
+ScaleSet
+*/
+type CreateEnterpriseScaleSetOK struct {
+ Payload garm_params.ScaleSet
+}
+
+// IsSuccess returns true when this create enterprise scale set o k response has a 2xx status code
+func (o *CreateEnterpriseScaleSetOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create enterprise scale set o k response has a 3xx status code
+func (o *CreateEnterpriseScaleSetOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create enterprise scale set o k response has a 4xx status code
+func (o *CreateEnterpriseScaleSetOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create enterprise scale set o k response has a 5xx status code
+func (o *CreateEnterpriseScaleSetOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create enterprise scale set o k response a status code equal to that given
+func (o *CreateEnterpriseScaleSetOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create enterprise scale set o k response
+func (o *CreateEnterpriseScaleSetOK) Code() int {
+ return 200
+}
+
+func (o *CreateEnterpriseScaleSetOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/scalesets][%d] createEnterpriseScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateEnterpriseScaleSetOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/scalesets][%d] createEnterpriseScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateEnterpriseScaleSetOK) GetPayload() garm_params.ScaleSet {
+ return o.Payload
+}
+
+func (o *CreateEnterpriseScaleSetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateEnterpriseScaleSetDefault creates a CreateEnterpriseScaleSetDefault with default headers values
+func NewCreateEnterpriseScaleSetDefault(code int) *CreateEnterpriseScaleSetDefault {
+ return &CreateEnterpriseScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateEnterpriseScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateEnterpriseScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create enterprise scale set default response has a 2xx status code
+func (o *CreateEnterpriseScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create enterprise scale set default response has a 3xx status code
+func (o *CreateEnterpriseScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create enterprise scale set default response has a 4xx status code
+func (o *CreateEnterpriseScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create enterprise scale set default response has a 5xx status code
+func (o *CreateEnterpriseScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create enterprise scale set default response a status code equal to that given
+func (o *CreateEnterpriseScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create enterprise scale set default response
+func (o *CreateEnterpriseScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateEnterpriseScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/scalesets][%d] CreateEnterpriseScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateEnterpriseScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /enterprises/{enterpriseID}/scalesets][%d] CreateEnterpriseScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateEnterpriseScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateEnterpriseScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/delete_enterprise_parameters.go b/client/enterprises/delete_enterprise_parameters.go
new file mode 100644
index 00000000..5b6b7e5e
--- /dev/null
+++ b/client/enterprises/delete_enterprise_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteEnterpriseParams creates a new DeleteEnterpriseParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteEnterpriseParams() *DeleteEnterpriseParams {
+ return &DeleteEnterpriseParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteEnterpriseParamsWithTimeout creates a new DeleteEnterpriseParams object
+// with the ability to set a timeout on a request.
+func NewDeleteEnterpriseParamsWithTimeout(timeout time.Duration) *DeleteEnterpriseParams {
+ return &DeleteEnterpriseParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteEnterpriseParamsWithContext creates a new DeleteEnterpriseParams object
+// with the ability to set a context for a request.
+func NewDeleteEnterpriseParamsWithContext(ctx context.Context) *DeleteEnterpriseParams {
+ return &DeleteEnterpriseParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteEnterpriseParamsWithHTTPClient creates a new DeleteEnterpriseParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteEnterpriseParamsWithHTTPClient(client *http.Client) *DeleteEnterpriseParams {
+ return &DeleteEnterpriseParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteEnterpriseParams contains all the parameters to send to the API endpoint
+
+ for the delete enterprise operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteEnterpriseParams struct {
+
+ /* EnterpriseID.
+
+ ID of the enterprise to delete.
+ */
+ EnterpriseID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete enterprise params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteEnterpriseParams) WithDefaults() *DeleteEnterpriseParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete enterprise params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteEnterpriseParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete enterprise params
+func (o *DeleteEnterpriseParams) WithTimeout(timeout time.Duration) *DeleteEnterpriseParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete enterprise params
+func (o *DeleteEnterpriseParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete enterprise params
+func (o *DeleteEnterpriseParams) WithContext(ctx context.Context) *DeleteEnterpriseParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete enterprise params
+func (o *DeleteEnterpriseParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete enterprise params
+func (o *DeleteEnterpriseParams) WithHTTPClient(client *http.Client) *DeleteEnterpriseParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete enterprise params
+func (o *DeleteEnterpriseParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEnterpriseID adds the enterpriseID to the delete enterprise params
+func (o *DeleteEnterpriseParams) WithEnterpriseID(enterpriseID string) *DeleteEnterpriseParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the delete enterprise params
+func (o *DeleteEnterpriseParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteEnterpriseParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/delete_enterprise_pool_parameters.go b/client/enterprises/delete_enterprise_pool_parameters.go
new file mode 100644
index 00000000..bfb7d875
--- /dev/null
+++ b/client/enterprises/delete_enterprise_pool_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteEnterprisePoolParams creates a new DeleteEnterprisePoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteEnterprisePoolParams() *DeleteEnterprisePoolParams {
+ return &DeleteEnterprisePoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteEnterprisePoolParamsWithTimeout creates a new DeleteEnterprisePoolParams object
+// with the ability to set a timeout on a request.
+func NewDeleteEnterprisePoolParamsWithTimeout(timeout time.Duration) *DeleteEnterprisePoolParams {
+ return &DeleteEnterprisePoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteEnterprisePoolParamsWithContext creates a new DeleteEnterprisePoolParams object
+// with the ability to set a context for a request.
+func NewDeleteEnterprisePoolParamsWithContext(ctx context.Context) *DeleteEnterprisePoolParams {
+ return &DeleteEnterprisePoolParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteEnterprisePoolParamsWithHTTPClient creates a new DeleteEnterprisePoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteEnterprisePoolParamsWithHTTPClient(client *http.Client) *DeleteEnterprisePoolParams {
+ return &DeleteEnterprisePoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteEnterprisePoolParams contains all the parameters to send to the API endpoint
+
+ for the delete enterprise pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteEnterprisePoolParams struct {
+
+ /* EnterpriseID.
+
+ Enterprise ID.
+ */
+ EnterpriseID string
+
+ /* PoolID.
+
+ ID of the enterprise pool to delete.
+ */
+ PoolID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete enterprise pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteEnterprisePoolParams) WithDefaults() *DeleteEnterprisePoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete enterprise pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteEnterprisePoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete enterprise pool params
+func (o *DeleteEnterprisePoolParams) WithTimeout(timeout time.Duration) *DeleteEnterprisePoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete enterprise pool params
+func (o *DeleteEnterprisePoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete enterprise pool params
+func (o *DeleteEnterprisePoolParams) WithContext(ctx context.Context) *DeleteEnterprisePoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete enterprise pool params
+func (o *DeleteEnterprisePoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete enterprise pool params
+func (o *DeleteEnterprisePoolParams) WithHTTPClient(client *http.Client) *DeleteEnterprisePoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete enterprise pool params
+func (o *DeleteEnterprisePoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEnterpriseID adds the enterpriseID to the delete enterprise pool params
+func (o *DeleteEnterprisePoolParams) WithEnterpriseID(enterpriseID string) *DeleteEnterprisePoolParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the delete enterprise pool params
+func (o *DeleteEnterprisePoolParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WithPoolID adds the poolID to the delete enterprise pool params
+func (o *DeleteEnterprisePoolParams) WithPoolID(poolID string) *DeleteEnterprisePoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the delete enterprise pool params
+func (o *DeleteEnterprisePoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteEnterprisePoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/delete_enterprise_pool_responses.go b/client/enterprises/delete_enterprise_pool_responses.go
new file mode 100644
index 00000000..88de90b8
--- /dev/null
+++ b/client/enterprises/delete_enterprise_pool_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteEnterprisePoolReader is a Reader for the DeleteEnterprisePool structure.
+type DeleteEnterprisePoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteEnterprisePoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteEnterprisePoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteEnterprisePoolDefault creates a DeleteEnterprisePoolDefault with default headers values
+func NewDeleteEnterprisePoolDefault(code int) *DeleteEnterprisePoolDefault {
+ return &DeleteEnterprisePoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteEnterprisePoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteEnterprisePoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete enterprise pool default response has a 2xx status code
+func (o *DeleteEnterprisePoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete enterprise pool default response has a 3xx status code
+func (o *DeleteEnterprisePoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete enterprise pool default response has a 4xx status code
+func (o *DeleteEnterprisePoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete enterprise pool default response has a 5xx status code
+func (o *DeleteEnterprisePoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete enterprise pool default response a status code equal to that given
+func (o *DeleteEnterprisePoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete enterprise pool default response
+func (o *DeleteEnterprisePoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteEnterprisePoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}/pools/{poolID}][%d] DeleteEnterprisePool default %s", o._statusCode, payload)
+}
+
+func (o *DeleteEnterprisePoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}/pools/{poolID}][%d] DeleteEnterprisePool default %s", o._statusCode, payload)
+}
+
+func (o *DeleteEnterprisePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteEnterprisePoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/delete_enterprise_responses.go b/client/enterprises/delete_enterprise_responses.go
new file mode 100644
index 00000000..097f8983
--- /dev/null
+++ b/client/enterprises/delete_enterprise_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteEnterpriseReader is a Reader for the DeleteEnterprise structure.
+type DeleteEnterpriseReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteEnterpriseReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteEnterpriseDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteEnterpriseDefault creates a DeleteEnterpriseDefault with default headers values
+func NewDeleteEnterpriseDefault(code int) *DeleteEnterpriseDefault {
+ return &DeleteEnterpriseDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteEnterpriseDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteEnterpriseDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete enterprise default response has a 2xx status code
+func (o *DeleteEnterpriseDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete enterprise default response has a 3xx status code
+func (o *DeleteEnterpriseDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete enterprise default response has a 4xx status code
+func (o *DeleteEnterpriseDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete enterprise default response has a 5xx status code
+func (o *DeleteEnterpriseDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete enterprise default response a status code equal to that given
+func (o *DeleteEnterpriseDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete enterprise default response
+func (o *DeleteEnterpriseDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteEnterpriseDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}][%d] DeleteEnterprise default %s", o._statusCode, payload)
+}
+
+func (o *DeleteEnterpriseDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /enterprises/{enterpriseID}][%d] DeleteEnterprise default %s", o._statusCode, payload)
+}
+
+func (o *DeleteEnterpriseDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteEnterpriseDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/enterprises_client.go b/client/enterprises/enterprises_client.go
new file mode 100644
index 00000000..0014ca96
--- /dev/null
+++ b/client/enterprises/enterprises_client.go
@@ -0,0 +1,571 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new enterprises API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new enterprises API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new enterprises API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for enterprises API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ CreateEnterprise(params *CreateEnterpriseParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateEnterpriseOK, error)
+
+ CreateEnterprisePool(params *CreateEnterprisePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateEnterprisePoolOK, error)
+
+ CreateEnterpriseScaleSet(params *CreateEnterpriseScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateEnterpriseScaleSetOK, error)
+
+ DeleteEnterprise(params *DeleteEnterpriseParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ DeleteEnterprisePool(params *DeleteEnterprisePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ GetEnterprise(params *GetEnterpriseParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetEnterpriseOK, error)
+
+ GetEnterprisePool(params *GetEnterprisePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetEnterprisePoolOK, error)
+
+ ListEnterpriseInstances(params *ListEnterpriseInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterpriseInstancesOK, error)
+
+ ListEnterprisePools(params *ListEnterprisePoolsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterprisePoolsOK, error)
+
+ ListEnterpriseScaleSets(params *ListEnterpriseScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterpriseScaleSetsOK, error)
+
+ ListEnterprises(params *ListEnterprisesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterprisesOK, error)
+
+ UpdateEnterprise(params *UpdateEnterpriseParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateEnterpriseOK, error)
+
+ UpdateEnterprisePool(params *UpdateEnterprisePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateEnterprisePoolOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+CreateEnterprise creates enterprise with the given parameters
+*/
+func (a *Client) CreateEnterprise(params *CreateEnterpriseParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateEnterpriseOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateEnterpriseParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateEnterprise",
+ Method: "POST",
+ PathPattern: "/enterprises",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateEnterpriseReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateEnterpriseOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateEnterpriseDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+CreateEnterprisePool creates enterprise pool with the parameters given
+*/
+func (a *Client) CreateEnterprisePool(params *CreateEnterprisePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateEnterprisePoolOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateEnterprisePoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateEnterprisePool",
+ Method: "POST",
+ PathPattern: "/enterprises/{enterpriseID}/pools",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateEnterprisePoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateEnterprisePoolOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateEnterprisePoolDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+CreateEnterpriseScaleSet creates enterprise pool with the parameters given
+*/
+func (a *Client) CreateEnterpriseScaleSet(params *CreateEnterpriseScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateEnterpriseScaleSetOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateEnterpriseScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateEnterpriseScaleSet",
+ Method: "POST",
+ PathPattern: "/enterprises/{enterpriseID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateEnterpriseScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateEnterpriseScaleSetOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateEnterpriseScaleSetDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+DeleteEnterprise deletes enterprise by ID
+*/
+func (a *Client) DeleteEnterprise(params *DeleteEnterpriseParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteEnterpriseParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteEnterprise",
+ Method: "DELETE",
+ PathPattern: "/enterprises/{enterpriseID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteEnterpriseReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+DeleteEnterprisePool deletes enterprise pool by ID
+*/
+func (a *Client) DeleteEnterprisePool(params *DeleteEnterprisePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteEnterprisePoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteEnterprisePool",
+ Method: "DELETE",
+ PathPattern: "/enterprises/{enterpriseID}/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteEnterprisePoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+GetEnterprise gets enterprise by ID
+*/
+func (a *Client) GetEnterprise(params *GetEnterpriseParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetEnterpriseOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetEnterpriseParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetEnterprise",
+ Method: "GET",
+ PathPattern: "/enterprises/{enterpriseID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetEnterpriseReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetEnterpriseOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetEnterpriseDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+GetEnterprisePool gets enterprise pool by ID
+*/
+func (a *Client) GetEnterprisePool(params *GetEnterprisePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetEnterprisePoolOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetEnterprisePoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetEnterprisePool",
+ Method: "GET",
+ PathPattern: "/enterprises/{enterpriseID}/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetEnterprisePoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetEnterprisePoolOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetEnterprisePoolDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListEnterpriseInstances lists enterprise instances
+*/
+func (a *Client) ListEnterpriseInstances(params *ListEnterpriseInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterpriseInstancesOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListEnterpriseInstancesParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListEnterpriseInstances",
+ Method: "GET",
+ PathPattern: "/enterprises/{enterpriseID}/instances",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListEnterpriseInstancesReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListEnterpriseInstancesOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListEnterpriseInstancesDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListEnterprisePools lists enterprise pools
+*/
+func (a *Client) ListEnterprisePools(params *ListEnterprisePoolsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterprisePoolsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListEnterprisePoolsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListEnterprisePools",
+ Method: "GET",
+ PathPattern: "/enterprises/{enterpriseID}/pools",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListEnterprisePoolsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListEnterprisePoolsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListEnterprisePoolsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListEnterpriseScaleSets lists enterprise scale sets
+*/
+func (a *Client) ListEnterpriseScaleSets(params *ListEnterpriseScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterpriseScaleSetsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListEnterpriseScaleSetsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListEnterpriseScaleSets",
+ Method: "GET",
+ PathPattern: "/enterprises/{enterpriseID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListEnterpriseScaleSetsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListEnterpriseScaleSetsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListEnterpriseScaleSetsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListEnterprises lists all enterprises
+*/
+func (a *Client) ListEnterprises(params *ListEnterprisesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListEnterprisesOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListEnterprisesParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListEnterprises",
+ Method: "GET",
+ PathPattern: "/enterprises",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListEnterprisesReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListEnterprisesOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListEnterprisesDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UpdateEnterprise updates enterprise with the given parameters
+*/
+func (a *Client) UpdateEnterprise(params *UpdateEnterpriseParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateEnterpriseOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateEnterpriseParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateEnterprise",
+ Method: "PUT",
+ PathPattern: "/enterprises/{enterpriseID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateEnterpriseReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateEnterpriseOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateEnterpriseDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UpdateEnterprisePool updates enterprise pool with the parameters given
+*/
+func (a *Client) UpdateEnterprisePool(params *UpdateEnterprisePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateEnterprisePoolOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateEnterprisePoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateEnterprisePool",
+ Method: "PUT",
+ PathPattern: "/enterprises/{enterpriseID}/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateEnterprisePoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateEnterprisePoolOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateEnterprisePoolDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/enterprises/get_enterprise_parameters.go b/client/enterprises/get_enterprise_parameters.go
new file mode 100644
index 00000000..97161943
--- /dev/null
+++ b/client/enterprises/get_enterprise_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetEnterpriseParams creates a new GetEnterpriseParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetEnterpriseParams() *GetEnterpriseParams {
+ return &GetEnterpriseParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetEnterpriseParamsWithTimeout creates a new GetEnterpriseParams object
+// with the ability to set a timeout on a request.
+func NewGetEnterpriseParamsWithTimeout(timeout time.Duration) *GetEnterpriseParams {
+ return &GetEnterpriseParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetEnterpriseParamsWithContext creates a new GetEnterpriseParams object
+// with the ability to set a context for a request.
+func NewGetEnterpriseParamsWithContext(ctx context.Context) *GetEnterpriseParams {
+ return &GetEnterpriseParams{
+ Context: ctx,
+ }
+}
+
+// NewGetEnterpriseParamsWithHTTPClient creates a new GetEnterpriseParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetEnterpriseParamsWithHTTPClient(client *http.Client) *GetEnterpriseParams {
+ return &GetEnterpriseParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetEnterpriseParams contains all the parameters to send to the API endpoint
+
+ for the get enterprise operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetEnterpriseParams struct {
+
+ /* EnterpriseID.
+
+ The ID of the enterprise to fetch.
+ */
+ EnterpriseID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get enterprise params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEnterpriseParams) WithDefaults() *GetEnterpriseParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get enterprise params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEnterpriseParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get enterprise params
+func (o *GetEnterpriseParams) WithTimeout(timeout time.Duration) *GetEnterpriseParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get enterprise params
+func (o *GetEnterpriseParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get enterprise params
+func (o *GetEnterpriseParams) WithContext(ctx context.Context) *GetEnterpriseParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get enterprise params
+func (o *GetEnterpriseParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get enterprise params
+func (o *GetEnterpriseParams) WithHTTPClient(client *http.Client) *GetEnterpriseParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get enterprise params
+func (o *GetEnterpriseParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEnterpriseID adds the enterpriseID to the get enterprise params
+func (o *GetEnterpriseParams) WithEnterpriseID(enterpriseID string) *GetEnterpriseParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the get enterprise params
+func (o *GetEnterpriseParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetEnterpriseParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/get_enterprise_pool_parameters.go b/client/enterprises/get_enterprise_pool_parameters.go
new file mode 100644
index 00000000..3d8180e6
--- /dev/null
+++ b/client/enterprises/get_enterprise_pool_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetEnterprisePoolParams creates a new GetEnterprisePoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetEnterprisePoolParams() *GetEnterprisePoolParams {
+ return &GetEnterprisePoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetEnterprisePoolParamsWithTimeout creates a new GetEnterprisePoolParams object
+// with the ability to set a timeout on a request.
+func NewGetEnterprisePoolParamsWithTimeout(timeout time.Duration) *GetEnterprisePoolParams {
+ return &GetEnterprisePoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetEnterprisePoolParamsWithContext creates a new GetEnterprisePoolParams object
+// with the ability to set a context for a request.
+func NewGetEnterprisePoolParamsWithContext(ctx context.Context) *GetEnterprisePoolParams {
+ return &GetEnterprisePoolParams{
+ Context: ctx,
+ }
+}
+
+// NewGetEnterprisePoolParamsWithHTTPClient creates a new GetEnterprisePoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetEnterprisePoolParamsWithHTTPClient(client *http.Client) *GetEnterprisePoolParams {
+ return &GetEnterprisePoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetEnterprisePoolParams contains all the parameters to send to the API endpoint
+
+ for the get enterprise pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetEnterprisePoolParams struct {
+
+ /* EnterpriseID.
+
+ Enterprise ID.
+ */
+ EnterpriseID string
+
+ /* PoolID.
+
+ Pool ID.
+ */
+ PoolID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get enterprise pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEnterprisePoolParams) WithDefaults() *GetEnterprisePoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get enterprise pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEnterprisePoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get enterprise pool params
+func (o *GetEnterprisePoolParams) WithTimeout(timeout time.Duration) *GetEnterprisePoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get enterprise pool params
+func (o *GetEnterprisePoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get enterprise pool params
+func (o *GetEnterprisePoolParams) WithContext(ctx context.Context) *GetEnterprisePoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get enterprise pool params
+func (o *GetEnterprisePoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get enterprise pool params
+func (o *GetEnterprisePoolParams) WithHTTPClient(client *http.Client) *GetEnterprisePoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get enterprise pool params
+func (o *GetEnterprisePoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEnterpriseID adds the enterpriseID to the get enterprise pool params
+func (o *GetEnterprisePoolParams) WithEnterpriseID(enterpriseID string) *GetEnterprisePoolParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the get enterprise pool params
+func (o *GetEnterprisePoolParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WithPoolID adds the poolID to the get enterprise pool params
+func (o *GetEnterprisePoolParams) WithPoolID(poolID string) *GetEnterprisePoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the get enterprise pool params
+func (o *GetEnterprisePoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetEnterprisePoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/get_enterprise_pool_responses.go b/client/enterprises/get_enterprise_pool_responses.go
new file mode 100644
index 00000000..df23d774
--- /dev/null
+++ b/client/enterprises/get_enterprise_pool_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetEnterprisePoolReader is a Reader for the GetEnterprisePool structure.
+type GetEnterprisePoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetEnterprisePoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetEnterprisePoolOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetEnterprisePoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetEnterprisePoolOK creates a GetEnterprisePoolOK with default headers values
+func NewGetEnterprisePoolOK() *GetEnterprisePoolOK {
+ return &GetEnterprisePoolOK{}
+}
+
+/*
+GetEnterprisePoolOK describes a response with status code 200, with default header values.
+
+Pool
+*/
+type GetEnterprisePoolOK struct {
+ Payload garm_params.Pool
+}
+
+// IsSuccess returns true when this get enterprise pool o k response has a 2xx status code
+func (o *GetEnterprisePoolOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get enterprise pool o k response has a 3xx status code
+func (o *GetEnterprisePoolOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get enterprise pool o k response has a 4xx status code
+func (o *GetEnterprisePoolOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get enterprise pool o k response has a 5xx status code
+func (o *GetEnterprisePoolOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get enterprise pool o k response a status code equal to that given
+func (o *GetEnterprisePoolOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get enterprise pool o k response
+func (o *GetEnterprisePoolOK) Code() int {
+ return 200
+}
+
+func (o *GetEnterprisePoolOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] getEnterprisePoolOK %s", 200, payload)
+}
+
+func (o *GetEnterprisePoolOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] getEnterprisePoolOK %s", 200, payload)
+}
+
+func (o *GetEnterprisePoolOK) GetPayload() garm_params.Pool {
+ return o.Payload
+}
+
+func (o *GetEnterprisePoolOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetEnterprisePoolDefault creates a GetEnterprisePoolDefault with default headers values
+func NewGetEnterprisePoolDefault(code int) *GetEnterprisePoolDefault {
+ return &GetEnterprisePoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetEnterprisePoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetEnterprisePoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get enterprise pool default response has a 2xx status code
+func (o *GetEnterprisePoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get enterprise pool default response has a 3xx status code
+func (o *GetEnterprisePoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get enterprise pool default response has a 4xx status code
+func (o *GetEnterprisePoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get enterprise pool default response has a 5xx status code
+func (o *GetEnterprisePoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get enterprise pool default response a status code equal to that given
+func (o *GetEnterprisePoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get enterprise pool default response
+func (o *GetEnterprisePoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetEnterprisePoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] GetEnterprisePool default %s", o._statusCode, payload)
+}
+
+func (o *GetEnterprisePoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools/{poolID}][%d] GetEnterprisePool default %s", o._statusCode, payload)
+}
+
+func (o *GetEnterprisePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetEnterprisePoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/get_enterprise_responses.go b/client/enterprises/get_enterprise_responses.go
new file mode 100644
index 00000000..b617c75f
--- /dev/null
+++ b/client/enterprises/get_enterprise_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetEnterpriseReader is a Reader for the GetEnterprise structure.
+type GetEnterpriseReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetEnterpriseReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetEnterpriseOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetEnterpriseDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetEnterpriseOK creates a GetEnterpriseOK with default headers values
+func NewGetEnterpriseOK() *GetEnterpriseOK {
+ return &GetEnterpriseOK{}
+}
+
+/*
+GetEnterpriseOK describes a response with status code 200, with default header values.
+
+Enterprise
+*/
+type GetEnterpriseOK struct {
+ Payload garm_params.Enterprise
+}
+
+// IsSuccess returns true when this get enterprise o k response has a 2xx status code
+func (o *GetEnterpriseOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get enterprise o k response has a 3xx status code
+func (o *GetEnterpriseOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get enterprise o k response has a 4xx status code
+func (o *GetEnterpriseOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get enterprise o k response has a 5xx status code
+func (o *GetEnterpriseOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get enterprise o k response a status code equal to that given
+func (o *GetEnterpriseOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get enterprise o k response
+func (o *GetEnterpriseOK) Code() int {
+ return 200
+}
+
+func (o *GetEnterpriseOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] getEnterpriseOK %s", 200, payload)
+}
+
+func (o *GetEnterpriseOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] getEnterpriseOK %s", 200, payload)
+}
+
+func (o *GetEnterpriseOK) GetPayload() garm_params.Enterprise {
+ return o.Payload
+}
+
+func (o *GetEnterpriseOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetEnterpriseDefault creates a GetEnterpriseDefault with default headers values
+func NewGetEnterpriseDefault(code int) *GetEnterpriseDefault {
+ return &GetEnterpriseDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetEnterpriseDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetEnterpriseDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get enterprise default response has a 2xx status code
+func (o *GetEnterpriseDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get enterprise default response has a 3xx status code
+func (o *GetEnterpriseDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get enterprise default response has a 4xx status code
+func (o *GetEnterpriseDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get enterprise default response has a 5xx status code
+func (o *GetEnterpriseDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get enterprise default response a status code equal to that given
+func (o *GetEnterpriseDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get enterprise default response
+func (o *GetEnterpriseDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetEnterpriseDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] GetEnterprise default %s", o._statusCode, payload)
+}
+
+func (o *GetEnterpriseDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}][%d] GetEnterprise default %s", o._statusCode, payload)
+}
+
+func (o *GetEnterpriseDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetEnterpriseDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/list_enterprise_instances_parameters.go b/client/enterprises/list_enterprise_instances_parameters.go
new file mode 100644
index 00000000..5e23c3e0
--- /dev/null
+++ b/client/enterprises/list_enterprise_instances_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListEnterpriseInstancesParams creates a new ListEnterpriseInstancesParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListEnterpriseInstancesParams() *ListEnterpriseInstancesParams {
+ return &ListEnterpriseInstancesParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListEnterpriseInstancesParamsWithTimeout creates a new ListEnterpriseInstancesParams object
+// with the ability to set a timeout on a request.
+func NewListEnterpriseInstancesParamsWithTimeout(timeout time.Duration) *ListEnterpriseInstancesParams {
+ return &ListEnterpriseInstancesParams{
+ timeout: timeout,
+ }
+}
+
+// NewListEnterpriseInstancesParamsWithContext creates a new ListEnterpriseInstancesParams object
+// with the ability to set a context for a request.
+func NewListEnterpriseInstancesParamsWithContext(ctx context.Context) *ListEnterpriseInstancesParams {
+ return &ListEnterpriseInstancesParams{
+ Context: ctx,
+ }
+}
+
+// NewListEnterpriseInstancesParamsWithHTTPClient creates a new ListEnterpriseInstancesParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListEnterpriseInstancesParamsWithHTTPClient(client *http.Client) *ListEnterpriseInstancesParams {
+ return &ListEnterpriseInstancesParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListEnterpriseInstancesParams contains all the parameters to send to the API endpoint
+
+ for the list enterprise instances operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListEnterpriseInstancesParams struct {
+
+ /* EnterpriseID.
+
+ Enterprise ID.
+ */
+ EnterpriseID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list enterprise instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListEnterpriseInstancesParams) WithDefaults() *ListEnterpriseInstancesParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list enterprise instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListEnterpriseInstancesParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list enterprise instances params
+func (o *ListEnterpriseInstancesParams) WithTimeout(timeout time.Duration) *ListEnterpriseInstancesParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list enterprise instances params
+func (o *ListEnterpriseInstancesParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list enterprise instances params
+func (o *ListEnterpriseInstancesParams) WithContext(ctx context.Context) *ListEnterpriseInstancesParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list enterprise instances params
+func (o *ListEnterpriseInstancesParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list enterprise instances params
+func (o *ListEnterpriseInstancesParams) WithHTTPClient(client *http.Client) *ListEnterpriseInstancesParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list enterprise instances params
+func (o *ListEnterpriseInstancesParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEnterpriseID adds the enterpriseID to the list enterprise instances params
+func (o *ListEnterpriseInstancesParams) WithEnterpriseID(enterpriseID string) *ListEnterpriseInstancesParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the list enterprise instances params
+func (o *ListEnterpriseInstancesParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListEnterpriseInstancesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/list_enterprise_instances_responses.go b/client/enterprises/list_enterprise_instances_responses.go
new file mode 100644
index 00000000..642e4ff2
--- /dev/null
+++ b/client/enterprises/list_enterprise_instances_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListEnterpriseInstancesReader is a Reader for the ListEnterpriseInstances structure.
+type ListEnterpriseInstancesReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListEnterpriseInstancesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListEnterpriseInstancesOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListEnterpriseInstancesDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListEnterpriseInstancesOK creates a ListEnterpriseInstancesOK with default headers values
+func NewListEnterpriseInstancesOK() *ListEnterpriseInstancesOK {
+ return &ListEnterpriseInstancesOK{}
+}
+
+/*
+ListEnterpriseInstancesOK describes a response with status code 200, with default header values.
+
+Instances
+*/
+type ListEnterpriseInstancesOK struct {
+ Payload garm_params.Instances
+}
+
+// IsSuccess returns true when this list enterprise instances o k response has a 2xx status code
+func (o *ListEnterpriseInstancesOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list enterprise instances o k response has a 3xx status code
+func (o *ListEnterpriseInstancesOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list enterprise instances o k response has a 4xx status code
+func (o *ListEnterpriseInstancesOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list enterprise instances o k response has a 5xx status code
+func (o *ListEnterpriseInstancesOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list enterprise instances o k response a status code equal to that given
+func (o *ListEnterpriseInstancesOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list enterprise instances o k response
+func (o *ListEnterpriseInstancesOK) Code() int {
+ return 200
+}
+
+func (o *ListEnterpriseInstancesOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] listEnterpriseInstancesOK %s", 200, payload)
+}
+
+func (o *ListEnterpriseInstancesOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] listEnterpriseInstancesOK %s", 200, payload)
+}
+
+func (o *ListEnterpriseInstancesOK) GetPayload() garm_params.Instances {
+ return o.Payload
+}
+
+func (o *ListEnterpriseInstancesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListEnterpriseInstancesDefault creates a ListEnterpriseInstancesDefault with default headers values
+func NewListEnterpriseInstancesDefault(code int) *ListEnterpriseInstancesDefault {
+ return &ListEnterpriseInstancesDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListEnterpriseInstancesDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListEnterpriseInstancesDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list enterprise instances default response has a 2xx status code
+func (o *ListEnterpriseInstancesDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list enterprise instances default response has a 3xx status code
+func (o *ListEnterpriseInstancesDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list enterprise instances default response has a 4xx status code
+func (o *ListEnterpriseInstancesDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list enterprise instances default response has a 5xx status code
+func (o *ListEnterpriseInstancesDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list enterprise instances default response a status code equal to that given
+func (o *ListEnterpriseInstancesDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list enterprise instances default response
+func (o *ListEnterpriseInstancesDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListEnterpriseInstancesDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] ListEnterpriseInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListEnterpriseInstancesDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/instances][%d] ListEnterpriseInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListEnterpriseInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListEnterpriseInstancesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/list_enterprise_pools_parameters.go b/client/enterprises/list_enterprise_pools_parameters.go
new file mode 100644
index 00000000..6c58fe0e
--- /dev/null
+++ b/client/enterprises/list_enterprise_pools_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListEnterprisePoolsParams creates a new ListEnterprisePoolsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListEnterprisePoolsParams() *ListEnterprisePoolsParams {
+ return &ListEnterprisePoolsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListEnterprisePoolsParamsWithTimeout creates a new ListEnterprisePoolsParams object
+// with the ability to set a timeout on a request.
+func NewListEnterprisePoolsParamsWithTimeout(timeout time.Duration) *ListEnterprisePoolsParams {
+ return &ListEnterprisePoolsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListEnterprisePoolsParamsWithContext creates a new ListEnterprisePoolsParams object
+// with the ability to set a context for a request.
+func NewListEnterprisePoolsParamsWithContext(ctx context.Context) *ListEnterprisePoolsParams {
+ return &ListEnterprisePoolsParams{
+ Context: ctx,
+ }
+}
+
+// NewListEnterprisePoolsParamsWithHTTPClient creates a new ListEnterprisePoolsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListEnterprisePoolsParamsWithHTTPClient(client *http.Client) *ListEnterprisePoolsParams {
+ return &ListEnterprisePoolsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListEnterprisePoolsParams contains all the parameters to send to the API endpoint
+
+ for the list enterprise pools operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListEnterprisePoolsParams struct {
+
+ /* EnterpriseID.
+
+ Enterprise ID.
+ */
+ EnterpriseID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list enterprise pools params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListEnterprisePoolsParams) WithDefaults() *ListEnterprisePoolsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list enterprise pools params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListEnterprisePoolsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list enterprise pools params
+func (o *ListEnterprisePoolsParams) WithTimeout(timeout time.Duration) *ListEnterprisePoolsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list enterprise pools params
+func (o *ListEnterprisePoolsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list enterprise pools params
+func (o *ListEnterprisePoolsParams) WithContext(ctx context.Context) *ListEnterprisePoolsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list enterprise pools params
+func (o *ListEnterprisePoolsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list enterprise pools params
+func (o *ListEnterprisePoolsParams) WithHTTPClient(client *http.Client) *ListEnterprisePoolsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list enterprise pools params
+func (o *ListEnterprisePoolsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEnterpriseID adds the enterpriseID to the list enterprise pools params
+func (o *ListEnterprisePoolsParams) WithEnterpriseID(enterpriseID string) *ListEnterprisePoolsParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the list enterprise pools params
+func (o *ListEnterprisePoolsParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListEnterprisePoolsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/list_enterprise_pools_responses.go b/client/enterprises/list_enterprise_pools_responses.go
new file mode 100644
index 00000000..29682eb3
--- /dev/null
+++ b/client/enterprises/list_enterprise_pools_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListEnterprisePoolsReader is a Reader for the ListEnterprisePools structure.
+type ListEnterprisePoolsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListEnterprisePoolsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListEnterprisePoolsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListEnterprisePoolsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListEnterprisePoolsOK creates a ListEnterprisePoolsOK with default headers values
+func NewListEnterprisePoolsOK() *ListEnterprisePoolsOK {
+ return &ListEnterprisePoolsOK{}
+}
+
+/*
+ListEnterprisePoolsOK describes a response with status code 200, with default header values.
+
+Pools
+*/
+type ListEnterprisePoolsOK struct {
+ Payload garm_params.Pools
+}
+
+// IsSuccess returns true when this list enterprise pools o k response has a 2xx status code
+func (o *ListEnterprisePoolsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list enterprise pools o k response has a 3xx status code
+func (o *ListEnterprisePoolsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list enterprise pools o k response has a 4xx status code
+func (o *ListEnterprisePoolsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list enterprise pools o k response has a 5xx status code
+func (o *ListEnterprisePoolsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list enterprise pools o k response a status code equal to that given
+func (o *ListEnterprisePoolsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list enterprise pools o k response
+func (o *ListEnterprisePoolsOK) Code() int {
+ return 200
+}
+
+func (o *ListEnterprisePoolsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] listEnterprisePoolsOK %s", 200, payload)
+}
+
+func (o *ListEnterprisePoolsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] listEnterprisePoolsOK %s", 200, payload)
+}
+
+func (o *ListEnterprisePoolsOK) GetPayload() garm_params.Pools {
+ return o.Payload
+}
+
+func (o *ListEnterprisePoolsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListEnterprisePoolsDefault creates a ListEnterprisePoolsDefault with default headers values
+func NewListEnterprisePoolsDefault(code int) *ListEnterprisePoolsDefault {
+ return &ListEnterprisePoolsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListEnterprisePoolsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListEnterprisePoolsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list enterprise pools default response has a 2xx status code
+func (o *ListEnterprisePoolsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list enterprise pools default response has a 3xx status code
+func (o *ListEnterprisePoolsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list enterprise pools default response has a 4xx status code
+func (o *ListEnterprisePoolsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list enterprise pools default response has a 5xx status code
+func (o *ListEnterprisePoolsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list enterprise pools default response a status code equal to that given
+func (o *ListEnterprisePoolsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list enterprise pools default response
+func (o *ListEnterprisePoolsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListEnterprisePoolsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] ListEnterprisePools default %s", o._statusCode, payload)
+}
+
+func (o *ListEnterprisePoolsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/pools][%d] ListEnterprisePools default %s", o._statusCode, payload)
+}
+
+func (o *ListEnterprisePoolsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListEnterprisePoolsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/list_enterprise_scale_sets_parameters.go b/client/enterprises/list_enterprise_scale_sets_parameters.go
new file mode 100644
index 00000000..f835717c
--- /dev/null
+++ b/client/enterprises/list_enterprise_scale_sets_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListEnterpriseScaleSetsParams creates a new ListEnterpriseScaleSetsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListEnterpriseScaleSetsParams() *ListEnterpriseScaleSetsParams {
+ return &ListEnterpriseScaleSetsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListEnterpriseScaleSetsParamsWithTimeout creates a new ListEnterpriseScaleSetsParams object
+// with the ability to set a timeout on a request.
+func NewListEnterpriseScaleSetsParamsWithTimeout(timeout time.Duration) *ListEnterpriseScaleSetsParams {
+ return &ListEnterpriseScaleSetsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListEnterpriseScaleSetsParamsWithContext creates a new ListEnterpriseScaleSetsParams object
+// with the ability to set a context for a request.
+func NewListEnterpriseScaleSetsParamsWithContext(ctx context.Context) *ListEnterpriseScaleSetsParams {
+ return &ListEnterpriseScaleSetsParams{
+ Context: ctx,
+ }
+}
+
+// NewListEnterpriseScaleSetsParamsWithHTTPClient creates a new ListEnterpriseScaleSetsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListEnterpriseScaleSetsParamsWithHTTPClient(client *http.Client) *ListEnterpriseScaleSetsParams {
+ return &ListEnterpriseScaleSetsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListEnterpriseScaleSetsParams contains all the parameters to send to the API endpoint
+
+ for the list enterprise scale sets operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListEnterpriseScaleSetsParams struct {
+
+ /* EnterpriseID.
+
+ Enterprise ID.
+ */
+ EnterpriseID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list enterprise scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListEnterpriseScaleSetsParams) WithDefaults() *ListEnterpriseScaleSetsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list enterprise scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListEnterpriseScaleSetsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) WithTimeout(timeout time.Duration) *ListEnterpriseScaleSetsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) WithContext(ctx context.Context) *ListEnterpriseScaleSetsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) WithHTTPClient(client *http.Client) *ListEnterpriseScaleSetsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEnterpriseID adds the enterpriseID to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) WithEnterpriseID(enterpriseID string) *ListEnterpriseScaleSetsParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the list enterprise scale sets params
+func (o *ListEnterpriseScaleSetsParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListEnterpriseScaleSetsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/list_enterprise_scale_sets_responses.go b/client/enterprises/list_enterprise_scale_sets_responses.go
new file mode 100644
index 00000000..9c2564c2
--- /dev/null
+++ b/client/enterprises/list_enterprise_scale_sets_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListEnterpriseScaleSetsReader is a Reader for the ListEnterpriseScaleSets structure.
+type ListEnterpriseScaleSetsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListEnterpriseScaleSetsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListEnterpriseScaleSetsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListEnterpriseScaleSetsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListEnterpriseScaleSetsOK creates a ListEnterpriseScaleSetsOK with default headers values
+func NewListEnterpriseScaleSetsOK() *ListEnterpriseScaleSetsOK {
+ return &ListEnterpriseScaleSetsOK{}
+}
+
+/*
+ListEnterpriseScaleSetsOK describes a response with status code 200, with default header values.
+
+ScaleSets
+*/
+type ListEnterpriseScaleSetsOK struct {
+ Payload garm_params.ScaleSets
+}
+
+// IsSuccess returns true when this list enterprise scale sets o k response has a 2xx status code
+func (o *ListEnterpriseScaleSetsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list enterprise scale sets o k response has a 3xx status code
+func (o *ListEnterpriseScaleSetsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list enterprise scale sets o k response has a 4xx status code
+func (o *ListEnterpriseScaleSetsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list enterprise scale sets o k response has a 5xx status code
+func (o *ListEnterpriseScaleSetsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list enterprise scale sets o k response a status code equal to that given
+func (o *ListEnterpriseScaleSetsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list enterprise scale sets o k response
+func (o *ListEnterpriseScaleSetsOK) Code() int {
+ return 200
+}
+
+func (o *ListEnterpriseScaleSetsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/scalesets][%d] listEnterpriseScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListEnterpriseScaleSetsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/scalesets][%d] listEnterpriseScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListEnterpriseScaleSetsOK) GetPayload() garm_params.ScaleSets {
+ return o.Payload
+}
+
+func (o *ListEnterpriseScaleSetsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListEnterpriseScaleSetsDefault creates a ListEnterpriseScaleSetsDefault with default headers values
+func NewListEnterpriseScaleSetsDefault(code int) *ListEnterpriseScaleSetsDefault {
+ return &ListEnterpriseScaleSetsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListEnterpriseScaleSetsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListEnterpriseScaleSetsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list enterprise scale sets default response has a 2xx status code
+func (o *ListEnterpriseScaleSetsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list enterprise scale sets default response has a 3xx status code
+func (o *ListEnterpriseScaleSetsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list enterprise scale sets default response has a 4xx status code
+func (o *ListEnterpriseScaleSetsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list enterprise scale sets default response has a 5xx status code
+func (o *ListEnterpriseScaleSetsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list enterprise scale sets default response a status code equal to that given
+func (o *ListEnterpriseScaleSetsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list enterprise scale sets default response
+func (o *ListEnterpriseScaleSetsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListEnterpriseScaleSetsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/scalesets][%d] ListEnterpriseScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListEnterpriseScaleSetsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises/{enterpriseID}/scalesets][%d] ListEnterpriseScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListEnterpriseScaleSetsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListEnterpriseScaleSetsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/list_enterprises_parameters.go b/client/enterprises/list_enterprises_parameters.go
new file mode 100644
index 00000000..44ba108b
--- /dev/null
+++ b/client/enterprises/list_enterprises_parameters.go
@@ -0,0 +1,197 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListEnterprisesParams creates a new ListEnterprisesParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListEnterprisesParams() *ListEnterprisesParams {
+ return &ListEnterprisesParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListEnterprisesParamsWithTimeout creates a new ListEnterprisesParams object
+// with the ability to set a timeout on a request.
+func NewListEnterprisesParamsWithTimeout(timeout time.Duration) *ListEnterprisesParams {
+ return &ListEnterprisesParams{
+ timeout: timeout,
+ }
+}
+
+// NewListEnterprisesParamsWithContext creates a new ListEnterprisesParams object
+// with the ability to set a context for a request.
+func NewListEnterprisesParamsWithContext(ctx context.Context) *ListEnterprisesParams {
+ return &ListEnterprisesParams{
+ Context: ctx,
+ }
+}
+
+// NewListEnterprisesParamsWithHTTPClient creates a new ListEnterprisesParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListEnterprisesParamsWithHTTPClient(client *http.Client) *ListEnterprisesParams {
+ return &ListEnterprisesParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListEnterprisesParams contains all the parameters to send to the API endpoint
+
+ for the list enterprises operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListEnterprisesParams struct {
+
+ /* Endpoint.
+
+ Exact endpoint name to filter by
+ */
+ Endpoint *string
+
+ /* Name.
+
+ Exact enterprise name to filter by
+ */
+ Name *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list enterprises params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListEnterprisesParams) WithDefaults() *ListEnterprisesParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list enterprises params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListEnterprisesParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list enterprises params
+func (o *ListEnterprisesParams) WithTimeout(timeout time.Duration) *ListEnterprisesParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list enterprises params
+func (o *ListEnterprisesParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list enterprises params
+func (o *ListEnterprisesParams) WithContext(ctx context.Context) *ListEnterprisesParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list enterprises params
+func (o *ListEnterprisesParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list enterprises params
+func (o *ListEnterprisesParams) WithHTTPClient(client *http.Client) *ListEnterprisesParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list enterprises params
+func (o *ListEnterprisesParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEndpoint adds the endpoint to the list enterprises params
+func (o *ListEnterprisesParams) WithEndpoint(endpoint *string) *ListEnterprisesParams {
+ o.SetEndpoint(endpoint)
+ return o
+}
+
+// SetEndpoint adds the endpoint to the list enterprises params
+func (o *ListEnterprisesParams) SetEndpoint(endpoint *string) {
+ o.Endpoint = endpoint
+}
+
+// WithName adds the name to the list enterprises params
+func (o *ListEnterprisesParams) WithName(name *string) *ListEnterprisesParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the list enterprises params
+func (o *ListEnterprisesParams) SetName(name *string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListEnterprisesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.Endpoint != nil {
+
+ // query param endpoint
+ var qrEndpoint string
+
+ if o.Endpoint != nil {
+ qrEndpoint = *o.Endpoint
+ }
+ qEndpoint := qrEndpoint
+ if qEndpoint != "" {
+
+ if err := r.SetQueryParam("endpoint", qEndpoint); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Name != nil {
+
+ // query param name
+ var qrName string
+
+ if o.Name != nil {
+ qrName = *o.Name
+ }
+ qName := qrName
+ if qName != "" {
+
+ if err := r.SetQueryParam("name", qName); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/list_enterprises_responses.go b/client/enterprises/list_enterprises_responses.go
new file mode 100644
index 00000000..6a2ec69d
--- /dev/null
+++ b/client/enterprises/list_enterprises_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListEnterprisesReader is a Reader for the ListEnterprises structure.
+type ListEnterprisesReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListEnterprisesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListEnterprisesOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListEnterprisesDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListEnterprisesOK creates a ListEnterprisesOK with default headers values
+func NewListEnterprisesOK() *ListEnterprisesOK {
+ return &ListEnterprisesOK{}
+}
+
+/*
+ListEnterprisesOK describes a response with status code 200, with default header values.
+
+Enterprises
+*/
+type ListEnterprisesOK struct {
+ Payload garm_params.Enterprises
+}
+
+// IsSuccess returns true when this list enterprises o k response has a 2xx status code
+func (o *ListEnterprisesOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list enterprises o k response has a 3xx status code
+func (o *ListEnterprisesOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list enterprises o k response has a 4xx status code
+func (o *ListEnterprisesOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list enterprises o k response has a 5xx status code
+func (o *ListEnterprisesOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list enterprises o k response a status code equal to that given
+func (o *ListEnterprisesOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list enterprises o k response
+func (o *ListEnterprisesOK) Code() int {
+ return 200
+}
+
+func (o *ListEnterprisesOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises][%d] listEnterprisesOK %s", 200, payload)
+}
+
+func (o *ListEnterprisesOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises][%d] listEnterprisesOK %s", 200, payload)
+}
+
+func (o *ListEnterprisesOK) GetPayload() garm_params.Enterprises {
+ return o.Payload
+}
+
+func (o *ListEnterprisesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListEnterprisesDefault creates a ListEnterprisesDefault with default headers values
+func NewListEnterprisesDefault(code int) *ListEnterprisesDefault {
+ return &ListEnterprisesDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListEnterprisesDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListEnterprisesDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list enterprises default response has a 2xx status code
+func (o *ListEnterprisesDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list enterprises default response has a 3xx status code
+func (o *ListEnterprisesDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list enterprises default response has a 4xx status code
+func (o *ListEnterprisesDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list enterprises default response has a 5xx status code
+func (o *ListEnterprisesDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list enterprises default response a status code equal to that given
+func (o *ListEnterprisesDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list enterprises default response
+func (o *ListEnterprisesDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListEnterprisesDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises][%d] ListEnterprises default %s", o._statusCode, payload)
+}
+
+func (o *ListEnterprisesDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /enterprises][%d] ListEnterprises default %s", o._statusCode, payload)
+}
+
+func (o *ListEnterprisesDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListEnterprisesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/update_enterprise_parameters.go b/client/enterprises/update_enterprise_parameters.go
new file mode 100644
index 00000000..2ad38eb6
--- /dev/null
+++ b/client/enterprises/update_enterprise_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateEnterpriseParams creates a new UpdateEnterpriseParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateEnterpriseParams() *UpdateEnterpriseParams {
+ return &UpdateEnterpriseParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateEnterpriseParamsWithTimeout creates a new UpdateEnterpriseParams object
+// with the ability to set a timeout on a request.
+func NewUpdateEnterpriseParamsWithTimeout(timeout time.Duration) *UpdateEnterpriseParams {
+ return &UpdateEnterpriseParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateEnterpriseParamsWithContext creates a new UpdateEnterpriseParams object
+// with the ability to set a context for a request.
+func NewUpdateEnterpriseParamsWithContext(ctx context.Context) *UpdateEnterpriseParams {
+ return &UpdateEnterpriseParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateEnterpriseParamsWithHTTPClient creates a new UpdateEnterpriseParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateEnterpriseParamsWithHTTPClient(client *http.Client) *UpdateEnterpriseParams {
+ return &UpdateEnterpriseParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateEnterpriseParams contains all the parameters to send to the API endpoint
+
+ for the update enterprise operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateEnterpriseParams struct {
+
+ /* Body.
+
+ Parameters used when updating the enterprise.
+ */
+ Body garm_params.UpdateEntityParams
+
+ /* EnterpriseID.
+
+ The ID of the enterprise to update.
+ */
+ EnterpriseID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update enterprise params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateEnterpriseParams) WithDefaults() *UpdateEnterpriseParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update enterprise params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateEnterpriseParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update enterprise params
+func (o *UpdateEnterpriseParams) WithTimeout(timeout time.Duration) *UpdateEnterpriseParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update enterprise params
+func (o *UpdateEnterpriseParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update enterprise params
+func (o *UpdateEnterpriseParams) WithContext(ctx context.Context) *UpdateEnterpriseParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update enterprise params
+func (o *UpdateEnterpriseParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update enterprise params
+func (o *UpdateEnterpriseParams) WithHTTPClient(client *http.Client) *UpdateEnterpriseParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update enterprise params
+func (o *UpdateEnterpriseParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update enterprise params
+func (o *UpdateEnterpriseParams) WithBody(body garm_params.UpdateEntityParams) *UpdateEnterpriseParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update enterprise params
+func (o *UpdateEnterpriseParams) SetBody(body garm_params.UpdateEntityParams) {
+ o.Body = body
+}
+
+// WithEnterpriseID adds the enterpriseID to the update enterprise params
+func (o *UpdateEnterpriseParams) WithEnterpriseID(enterpriseID string) *UpdateEnterpriseParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the update enterprise params
+func (o *UpdateEnterpriseParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateEnterpriseParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/update_enterprise_pool_parameters.go b/client/enterprises/update_enterprise_pool_parameters.go
new file mode 100644
index 00000000..7940eb54
--- /dev/null
+++ b/client/enterprises/update_enterprise_pool_parameters.go
@@ -0,0 +1,195 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateEnterprisePoolParams creates a new UpdateEnterprisePoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateEnterprisePoolParams() *UpdateEnterprisePoolParams {
+ return &UpdateEnterprisePoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateEnterprisePoolParamsWithTimeout creates a new UpdateEnterprisePoolParams object
+// with the ability to set a timeout on a request.
+func NewUpdateEnterprisePoolParamsWithTimeout(timeout time.Duration) *UpdateEnterprisePoolParams {
+ return &UpdateEnterprisePoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateEnterprisePoolParamsWithContext creates a new UpdateEnterprisePoolParams object
+// with the ability to set a context for a request.
+func NewUpdateEnterprisePoolParamsWithContext(ctx context.Context) *UpdateEnterprisePoolParams {
+ return &UpdateEnterprisePoolParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateEnterprisePoolParamsWithHTTPClient creates a new UpdateEnterprisePoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateEnterprisePoolParamsWithHTTPClient(client *http.Client) *UpdateEnterprisePoolParams {
+ return &UpdateEnterprisePoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateEnterprisePoolParams contains all the parameters to send to the API endpoint
+
+ for the update enterprise pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateEnterprisePoolParams struct {
+
+ /* Body.
+
+ Parameters used when updating the enterprise pool.
+ */
+ Body garm_params.UpdatePoolParams
+
+ /* EnterpriseID.
+
+ Enterprise ID.
+ */
+ EnterpriseID string
+
+ /* PoolID.
+
+ ID of the enterprise pool to update.
+ */
+ PoolID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update enterprise pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateEnterprisePoolParams) WithDefaults() *UpdateEnterprisePoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update enterprise pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateEnterprisePoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) WithTimeout(timeout time.Duration) *UpdateEnterprisePoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) WithContext(ctx context.Context) *UpdateEnterprisePoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) WithHTTPClient(client *http.Client) *UpdateEnterprisePoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) WithBody(body garm_params.UpdatePoolParams) *UpdateEnterprisePoolParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) SetBody(body garm_params.UpdatePoolParams) {
+ o.Body = body
+}
+
+// WithEnterpriseID adds the enterpriseID to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) WithEnterpriseID(enterpriseID string) *UpdateEnterprisePoolParams {
+ o.SetEnterpriseID(enterpriseID)
+ return o
+}
+
+// SetEnterpriseID adds the enterpriseId to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) SetEnterpriseID(enterpriseID string) {
+ o.EnterpriseID = enterpriseID
+}
+
+// WithPoolID adds the poolID to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) WithPoolID(poolID string) *UpdateEnterprisePoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the update enterprise pool params
+func (o *UpdateEnterprisePoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateEnterprisePoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param enterpriseID
+ if err := r.SetPathParam("enterpriseID", o.EnterpriseID); err != nil {
+ return err
+ }
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/enterprises/update_enterprise_pool_responses.go b/client/enterprises/update_enterprise_pool_responses.go
new file mode 100644
index 00000000..25a19974
--- /dev/null
+++ b/client/enterprises/update_enterprise_pool_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateEnterprisePoolReader is a Reader for the UpdateEnterprisePool structure.
+type UpdateEnterprisePoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateEnterprisePoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateEnterprisePoolOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateEnterprisePoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateEnterprisePoolOK creates a UpdateEnterprisePoolOK with default headers values
+func NewUpdateEnterprisePoolOK() *UpdateEnterprisePoolOK {
+ return &UpdateEnterprisePoolOK{}
+}
+
+/*
+UpdateEnterprisePoolOK describes a response with status code 200, with default header values.
+
+Pool
+*/
+type UpdateEnterprisePoolOK struct {
+ Payload garm_params.Pool
+}
+
+// IsSuccess returns true when this update enterprise pool o k response has a 2xx status code
+func (o *UpdateEnterprisePoolOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update enterprise pool o k response has a 3xx status code
+func (o *UpdateEnterprisePoolOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update enterprise pool o k response has a 4xx status code
+func (o *UpdateEnterprisePoolOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update enterprise pool o k response has a 5xx status code
+func (o *UpdateEnterprisePoolOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update enterprise pool o k response a status code equal to that given
+func (o *UpdateEnterprisePoolOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update enterprise pool o k response
+func (o *UpdateEnterprisePoolOK) Code() int {
+ return 200
+}
+
+func (o *UpdateEnterprisePoolOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] updateEnterprisePoolOK %s", 200, payload)
+}
+
+func (o *UpdateEnterprisePoolOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] updateEnterprisePoolOK %s", 200, payload)
+}
+
+func (o *UpdateEnterprisePoolOK) GetPayload() garm_params.Pool {
+ return o.Payload
+}
+
+func (o *UpdateEnterprisePoolOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateEnterprisePoolDefault creates a UpdateEnterprisePoolDefault with default headers values
+func NewUpdateEnterprisePoolDefault(code int) *UpdateEnterprisePoolDefault {
+ return &UpdateEnterprisePoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateEnterprisePoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateEnterprisePoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update enterprise pool default response has a 2xx status code
+func (o *UpdateEnterprisePoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update enterprise pool default response has a 3xx status code
+func (o *UpdateEnterprisePoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update enterprise pool default response has a 4xx status code
+func (o *UpdateEnterprisePoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update enterprise pool default response has a 5xx status code
+func (o *UpdateEnterprisePoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update enterprise pool default response a status code equal to that given
+func (o *UpdateEnterprisePoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update enterprise pool default response
+func (o *UpdateEnterprisePoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateEnterprisePoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] UpdateEnterprisePool default %s", o._statusCode, payload)
+}
+
+func (o *UpdateEnterprisePoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}/pools/{poolID}][%d] UpdateEnterprisePool default %s", o._statusCode, payload)
+}
+
+func (o *UpdateEnterprisePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateEnterprisePoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/enterprises/update_enterprise_responses.go b/client/enterprises/update_enterprise_responses.go
new file mode 100644
index 00000000..f6d34781
--- /dev/null
+++ b/client/enterprises/update_enterprise_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package enterprises
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateEnterpriseReader is a Reader for the UpdateEnterprise structure.
+type UpdateEnterpriseReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateEnterpriseReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateEnterpriseOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateEnterpriseDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateEnterpriseOK creates a UpdateEnterpriseOK with default headers values
+func NewUpdateEnterpriseOK() *UpdateEnterpriseOK {
+ return &UpdateEnterpriseOK{}
+}
+
+/*
+UpdateEnterpriseOK describes a response with status code 200, with default header values.
+
+Enterprise
+*/
+type UpdateEnterpriseOK struct {
+ Payload garm_params.Enterprise
+}
+
+// IsSuccess returns true when this update enterprise o k response has a 2xx status code
+func (o *UpdateEnterpriseOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update enterprise o k response has a 3xx status code
+func (o *UpdateEnterpriseOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update enterprise o k response has a 4xx status code
+func (o *UpdateEnterpriseOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update enterprise o k response has a 5xx status code
+func (o *UpdateEnterpriseOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update enterprise o k response a status code equal to that given
+func (o *UpdateEnterpriseOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update enterprise o k response
+func (o *UpdateEnterpriseOK) Code() int {
+ return 200
+}
+
+func (o *UpdateEnterpriseOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] updateEnterpriseOK %s", 200, payload)
+}
+
+func (o *UpdateEnterpriseOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] updateEnterpriseOK %s", 200, payload)
+}
+
+func (o *UpdateEnterpriseOK) GetPayload() garm_params.Enterprise {
+ return o.Payload
+}
+
+func (o *UpdateEnterpriseOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateEnterpriseDefault creates a UpdateEnterpriseDefault with default headers values
+func NewUpdateEnterpriseDefault(code int) *UpdateEnterpriseDefault {
+ return &UpdateEnterpriseDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateEnterpriseDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateEnterpriseDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update enterprise default response has a 2xx status code
+func (o *UpdateEnterpriseDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update enterprise default response has a 3xx status code
+func (o *UpdateEnterpriseDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update enterprise default response has a 4xx status code
+func (o *UpdateEnterpriseDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update enterprise default response has a 5xx status code
+func (o *UpdateEnterpriseDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update enterprise default response a status code equal to that given
+func (o *UpdateEnterpriseDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update enterprise default response
+func (o *UpdateEnterpriseDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateEnterpriseDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] UpdateEnterprise default %s", o._statusCode, payload)
+}
+
+func (o *UpdateEnterpriseDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /enterprises/{enterpriseID}][%d] UpdateEnterprise default %s", o._statusCode, payload)
+}
+
+func (o *UpdateEnterpriseDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateEnterpriseDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/first_run/first_run_client.go b/client/first_run/first_run_client.go
new file mode 100644
index 00000000..09792ad5
--- /dev/null
+++ b/client/first_run/first_run_client.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package first_run
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new first run API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new first run API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new first run API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for first run API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ FirstRun(params *FirstRunParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*FirstRunOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+FirstRun initializes the first run of the controller
+*/
+func (a *Client) FirstRun(params *FirstRunParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*FirstRunOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewFirstRunParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "FirstRun",
+ Method: "POST",
+ PathPattern: "/first-run",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &FirstRunReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*FirstRunOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for FirstRun: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/first_run/first_run_parameters.go b/client/first_run/first_run_parameters.go
new file mode 100644
index 00000000..5d3b91fd
--- /dev/null
+++ b/client/first_run/first_run_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package first_run
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewFirstRunParams creates a new FirstRunParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewFirstRunParams() *FirstRunParams {
+ return &FirstRunParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewFirstRunParamsWithTimeout creates a new FirstRunParams object
+// with the ability to set a timeout on a request.
+func NewFirstRunParamsWithTimeout(timeout time.Duration) *FirstRunParams {
+ return &FirstRunParams{
+ timeout: timeout,
+ }
+}
+
+// NewFirstRunParamsWithContext creates a new FirstRunParams object
+// with the ability to set a context for a request.
+func NewFirstRunParamsWithContext(ctx context.Context) *FirstRunParams {
+ return &FirstRunParams{
+ Context: ctx,
+ }
+}
+
+// NewFirstRunParamsWithHTTPClient creates a new FirstRunParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewFirstRunParamsWithHTTPClient(client *http.Client) *FirstRunParams {
+ return &FirstRunParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+FirstRunParams contains all the parameters to send to the API endpoint
+
+ for the first run operation.
+
+ Typically these are written to a http.Request.
+*/
+type FirstRunParams struct {
+
+ /* Body.
+
+ Create a new user.
+ */
+ Body garm_params.NewUserParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the first run params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *FirstRunParams) WithDefaults() *FirstRunParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the first run params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *FirstRunParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the first run params
+func (o *FirstRunParams) WithTimeout(timeout time.Duration) *FirstRunParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the first run params
+func (o *FirstRunParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the first run params
+func (o *FirstRunParams) WithContext(ctx context.Context) *FirstRunParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the first run params
+func (o *FirstRunParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the first run params
+func (o *FirstRunParams) WithHTTPClient(client *http.Client) *FirstRunParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the first run params
+func (o *FirstRunParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the first run params
+func (o *FirstRunParams) WithBody(body garm_params.NewUserParams) *FirstRunParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the first run params
+func (o *FirstRunParams) SetBody(body garm_params.NewUserParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *FirstRunParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/first_run/first_run_responses.go b/client/first_run/first_run_responses.go
new file mode 100644
index 00000000..d0d7c2e7
--- /dev/null
+++ b/client/first_run/first_run_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package first_run
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// FirstRunReader is a Reader for the FirstRun structure.
+type FirstRunReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *FirstRunReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewFirstRunOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewFirstRunBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[POST /first-run] FirstRun", response, response.Code())
+ }
+}
+
+// NewFirstRunOK creates a FirstRunOK with default headers values
+func NewFirstRunOK() *FirstRunOK {
+ return &FirstRunOK{}
+}
+
+/*
+FirstRunOK describes a response with status code 200, with default header values.
+
+User
+*/
+type FirstRunOK struct {
+ Payload garm_params.User
+}
+
+// IsSuccess returns true when this first run o k response has a 2xx status code
+func (o *FirstRunOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this first run o k response has a 3xx status code
+func (o *FirstRunOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this first run o k response has a 4xx status code
+func (o *FirstRunOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this first run o k response has a 5xx status code
+func (o *FirstRunOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this first run o k response a status code equal to that given
+func (o *FirstRunOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the first run o k response
+func (o *FirstRunOK) Code() int {
+ return 200
+}
+
+func (o *FirstRunOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /first-run][%d] firstRunOK %s", 200, payload)
+}
+
+func (o *FirstRunOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /first-run][%d] firstRunOK %s", 200, payload)
+}
+
+func (o *FirstRunOK) GetPayload() garm_params.User {
+ return o.Payload
+}
+
+func (o *FirstRunOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewFirstRunBadRequest creates a FirstRunBadRequest with default headers values
+func NewFirstRunBadRequest() *FirstRunBadRequest {
+ return &FirstRunBadRequest{}
+}
+
+/*
+FirstRunBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type FirstRunBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this first run bad request response has a 2xx status code
+func (o *FirstRunBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this first run bad request response has a 3xx status code
+func (o *FirstRunBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this first run bad request response has a 4xx status code
+func (o *FirstRunBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this first run bad request response has a 5xx status code
+func (o *FirstRunBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this first run bad request response a status code equal to that given
+func (o *FirstRunBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the first run bad request response
+func (o *FirstRunBadRequest) Code() int {
+ return 400
+}
+
+func (o *FirstRunBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /first-run][%d] firstRunBadRequest %s", 400, payload)
+}
+
+func (o *FirstRunBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /first-run][%d] firstRunBadRequest %s", 400, payload)
+}
+
+func (o *FirstRunBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *FirstRunBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/garm_api_client.go b/client/garm_api_client.go
new file mode 100644
index 00000000..f5bc51b2
--- /dev/null
+++ b/client/garm_api_client.go
@@ -0,0 +1,182 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package client
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cloudbase/garm/client/controller"
+ "github.com/cloudbase/garm/client/controller_info"
+ "github.com/cloudbase/garm/client/credentials"
+ "github.com/cloudbase/garm/client/endpoints"
+ "github.com/cloudbase/garm/client/enterprises"
+ "github.com/cloudbase/garm/client/first_run"
+ "github.com/cloudbase/garm/client/instances"
+ "github.com/cloudbase/garm/client/jobs"
+ "github.com/cloudbase/garm/client/login"
+ "github.com/cloudbase/garm/client/metrics_token"
+ "github.com/cloudbase/garm/client/organizations"
+ "github.com/cloudbase/garm/client/pools"
+ "github.com/cloudbase/garm/client/providers"
+ "github.com/cloudbase/garm/client/repositories"
+ "github.com/cloudbase/garm/client/scalesets"
+)
+
+// Default garm API HTTP client.
+var Default = NewHTTPClient(nil)
+
+const (
+ // DefaultHost is the default Host
+ // found in Meta (info) section of spec file
+ DefaultHost string = "localhost"
+ // DefaultBasePath is the default BasePath
+ // found in Meta (info) section of spec file
+ DefaultBasePath string = "/api/v1"
+)
+
+// DefaultSchemes are the default schemes found in Meta (info) section of spec file
+var DefaultSchemes = []string{"http"}
+
+// NewHTTPClient creates a new garm API HTTP client.
+func NewHTTPClient(formats strfmt.Registry) *GarmAPI {
+ return NewHTTPClientWithConfig(formats, nil)
+}
+
+// NewHTTPClientWithConfig creates a new garm API HTTP client,
+// using a customizable transport config.
+func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *GarmAPI {
+ // ensure nullable parameters have default
+ if cfg == nil {
+ cfg = DefaultTransportConfig()
+ }
+
+ // create transport and client
+ transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes)
+ return New(transport, formats)
+}
+
+// New creates a new garm API client
+func New(transport runtime.ClientTransport, formats strfmt.Registry) *GarmAPI {
+ // ensure nullable parameters have default
+ if formats == nil {
+ formats = strfmt.Default
+ }
+
+ cli := new(GarmAPI)
+ cli.Transport = transport
+ cli.Controller = controller.New(transport, formats)
+ cli.ControllerInfo = controller_info.New(transport, formats)
+ cli.Credentials = credentials.New(transport, formats)
+ cli.Endpoints = endpoints.New(transport, formats)
+ cli.Enterprises = enterprises.New(transport, formats)
+ cli.FirstRun = first_run.New(transport, formats)
+ cli.Instances = instances.New(transport, formats)
+ cli.Jobs = jobs.New(transport, formats)
+ cli.Login = login.New(transport, formats)
+ cli.MetricsToken = metrics_token.New(transport, formats)
+ cli.Organizations = organizations.New(transport, formats)
+ cli.Pools = pools.New(transport, formats)
+ cli.Providers = providers.New(transport, formats)
+ cli.Repositories = repositories.New(transport, formats)
+ cli.Scalesets = scalesets.New(transport, formats)
+ return cli
+}
+
+// DefaultTransportConfig creates a TransportConfig with the
+// default settings taken from the meta section of the spec file.
+func DefaultTransportConfig() *TransportConfig {
+ return &TransportConfig{
+ Host: DefaultHost,
+ BasePath: DefaultBasePath,
+ Schemes: DefaultSchemes,
+ }
+}
+
+// TransportConfig contains the transport related info,
+// found in the meta section of the spec file.
+type TransportConfig struct {
+ Host string
+ BasePath string
+ Schemes []string
+}
+
+// WithHost overrides the default host,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithHost(host string) *TransportConfig {
+ cfg.Host = host
+ return cfg
+}
+
+// WithBasePath overrides the default basePath,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig {
+ cfg.BasePath = basePath
+ return cfg
+}
+
+// WithSchemes overrides the default schemes,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig {
+ cfg.Schemes = schemes
+ return cfg
+}
+
+// GarmAPI is a client for garm API
+type GarmAPI struct {
+ Controller controller.ClientService
+
+ ControllerInfo controller_info.ClientService
+
+ Credentials credentials.ClientService
+
+ Endpoints endpoints.ClientService
+
+ Enterprises enterprises.ClientService
+
+ FirstRun first_run.ClientService
+
+ Instances instances.ClientService
+
+ Jobs jobs.ClientService
+
+ Login login.ClientService
+
+ MetricsToken metrics_token.ClientService
+
+ Organizations organizations.ClientService
+
+ Pools pools.ClientService
+
+ Providers providers.ClientService
+
+ Repositories repositories.ClientService
+
+ Scalesets scalesets.ClientService
+
+ Transport runtime.ClientTransport
+}
+
+// SetTransport changes the transport on the client and all its subresources
+func (c *GarmAPI) SetTransport(transport runtime.ClientTransport) {
+ c.Transport = transport
+ c.Controller.SetTransport(transport)
+ c.ControllerInfo.SetTransport(transport)
+ c.Credentials.SetTransport(transport)
+ c.Endpoints.SetTransport(transport)
+ c.Enterprises.SetTransport(transport)
+ c.FirstRun.SetTransport(transport)
+ c.Instances.SetTransport(transport)
+ c.Jobs.SetTransport(transport)
+ c.Login.SetTransport(transport)
+ c.MetricsToken.SetTransport(transport)
+ c.Organizations.SetTransport(transport)
+ c.Pools.SetTransport(transport)
+ c.Providers.SetTransport(transport)
+ c.Repositories.SetTransport(transport)
+ c.Scalesets.SetTransport(transport)
+}
diff --git a/client/instances/delete_instance_parameters.go b/client/instances/delete_instance_parameters.go
new file mode 100644
index 00000000..4a88ea5b
--- /dev/null
+++ b/client/instances/delete_instance_parameters.go
@@ -0,0 +1,220 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewDeleteInstanceParams creates a new DeleteInstanceParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteInstanceParams() *DeleteInstanceParams {
+ return &DeleteInstanceParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteInstanceParamsWithTimeout creates a new DeleteInstanceParams object
+// with the ability to set a timeout on a request.
+func NewDeleteInstanceParamsWithTimeout(timeout time.Duration) *DeleteInstanceParams {
+ return &DeleteInstanceParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteInstanceParamsWithContext creates a new DeleteInstanceParams object
+// with the ability to set a context for a request.
+func NewDeleteInstanceParamsWithContext(ctx context.Context) *DeleteInstanceParams {
+ return &DeleteInstanceParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteInstanceParamsWithHTTPClient creates a new DeleteInstanceParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteInstanceParamsWithHTTPClient(client *http.Client) *DeleteInstanceParams {
+ return &DeleteInstanceParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteInstanceParams contains all the parameters to send to the API endpoint
+
+ for the delete instance operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteInstanceParams struct {
+
+ /* BypassGHUnauthorized.
+
+ If true GARM will ignore unauthorized errors returned by GitHub when removing a runner. This is useful if you want to clean up runners and your credentials have expired.
+ */
+ BypassGHUnauthorized *bool
+
+ /* ForceRemove.
+
+ If true GARM will ignore any provider error when removing the runner and will continue to remove the runner from github and the GARM database.
+ */
+ ForceRemove *bool
+
+ /* InstanceName.
+
+ Runner instance name.
+ */
+ InstanceName string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete instance params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteInstanceParams) WithDefaults() *DeleteInstanceParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete instance params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteInstanceParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete instance params
+func (o *DeleteInstanceParams) WithTimeout(timeout time.Duration) *DeleteInstanceParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete instance params
+func (o *DeleteInstanceParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete instance params
+func (o *DeleteInstanceParams) WithContext(ctx context.Context) *DeleteInstanceParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete instance params
+func (o *DeleteInstanceParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete instance params
+func (o *DeleteInstanceParams) WithHTTPClient(client *http.Client) *DeleteInstanceParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete instance params
+func (o *DeleteInstanceParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBypassGHUnauthorized adds the bypassGHUnauthorized to the delete instance params
+func (o *DeleteInstanceParams) WithBypassGHUnauthorized(bypassGHUnauthorized *bool) *DeleteInstanceParams {
+ o.SetBypassGHUnauthorized(bypassGHUnauthorized)
+ return o
+}
+
+// SetBypassGHUnauthorized adds the bypassGHUnauthorized to the delete instance params
+func (o *DeleteInstanceParams) SetBypassGHUnauthorized(bypassGHUnauthorized *bool) {
+ o.BypassGHUnauthorized = bypassGHUnauthorized
+}
+
+// WithForceRemove adds the forceRemove to the delete instance params
+func (o *DeleteInstanceParams) WithForceRemove(forceRemove *bool) *DeleteInstanceParams {
+ o.SetForceRemove(forceRemove)
+ return o
+}
+
+// SetForceRemove adds the forceRemove to the delete instance params
+func (o *DeleteInstanceParams) SetForceRemove(forceRemove *bool) {
+ o.ForceRemove = forceRemove
+}
+
+// WithInstanceName adds the instanceName to the delete instance params
+func (o *DeleteInstanceParams) WithInstanceName(instanceName string) *DeleteInstanceParams {
+ o.SetInstanceName(instanceName)
+ return o
+}
+
+// SetInstanceName adds the instanceName to the delete instance params
+func (o *DeleteInstanceParams) SetInstanceName(instanceName string) {
+ o.InstanceName = instanceName
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteInstanceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.BypassGHUnauthorized != nil {
+
+ // query param bypassGHUnauthorized
+ var qrBypassGHUnauthorized bool
+
+ if o.BypassGHUnauthorized != nil {
+ qrBypassGHUnauthorized = *o.BypassGHUnauthorized
+ }
+ qBypassGHUnauthorized := swag.FormatBool(qrBypassGHUnauthorized)
+ if qBypassGHUnauthorized != "" {
+
+ if err := r.SetQueryParam("bypassGHUnauthorized", qBypassGHUnauthorized); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.ForceRemove != nil {
+
+ // query param forceRemove
+ var qrForceRemove bool
+
+ if o.ForceRemove != nil {
+ qrForceRemove = *o.ForceRemove
+ }
+ qForceRemove := swag.FormatBool(qrForceRemove)
+ if qForceRemove != "" {
+
+ if err := r.SetQueryParam("forceRemove", qForceRemove); err != nil {
+ return err
+ }
+ }
+ }
+
+ // path param instanceName
+ if err := r.SetPathParam("instanceName", o.InstanceName); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/instances/delete_instance_responses.go b/client/instances/delete_instance_responses.go
new file mode 100644
index 00000000..1748f66e
--- /dev/null
+++ b/client/instances/delete_instance_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteInstanceReader is a Reader for the DeleteInstance structure.
+type DeleteInstanceReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteInstanceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteInstanceDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteInstanceDefault creates a DeleteInstanceDefault with default headers values
+func NewDeleteInstanceDefault(code int) *DeleteInstanceDefault {
+ return &DeleteInstanceDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteInstanceDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteInstanceDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete instance default response has a 2xx status code
+func (o *DeleteInstanceDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete instance default response has a 3xx status code
+func (o *DeleteInstanceDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete instance default response has a 4xx status code
+func (o *DeleteInstanceDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete instance default response has a 5xx status code
+func (o *DeleteInstanceDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete instance default response a status code equal to that given
+func (o *DeleteInstanceDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete instance default response
+func (o *DeleteInstanceDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteInstanceDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /instances/{instanceName}][%d] DeleteInstance default %s", o._statusCode, payload)
+}
+
+func (o *DeleteInstanceDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /instances/{instanceName}][%d] DeleteInstance default %s", o._statusCode, payload)
+}
+
+func (o *DeleteInstanceDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteInstanceDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/instances/get_instance_parameters.go b/client/instances/get_instance_parameters.go
new file mode 100644
index 00000000..6d9e770b
--- /dev/null
+++ b/client/instances/get_instance_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetInstanceParams creates a new GetInstanceParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetInstanceParams() *GetInstanceParams {
+ return &GetInstanceParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetInstanceParamsWithTimeout creates a new GetInstanceParams object
+// with the ability to set a timeout on a request.
+func NewGetInstanceParamsWithTimeout(timeout time.Duration) *GetInstanceParams {
+ return &GetInstanceParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetInstanceParamsWithContext creates a new GetInstanceParams object
+// with the ability to set a context for a request.
+func NewGetInstanceParamsWithContext(ctx context.Context) *GetInstanceParams {
+ return &GetInstanceParams{
+ Context: ctx,
+ }
+}
+
+// NewGetInstanceParamsWithHTTPClient creates a new GetInstanceParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetInstanceParamsWithHTTPClient(client *http.Client) *GetInstanceParams {
+ return &GetInstanceParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetInstanceParams contains all the parameters to send to the API endpoint
+
+ for the get instance operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetInstanceParams struct {
+
+ /* InstanceName.
+
+ Runner instance name.
+ */
+ InstanceName string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get instance params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetInstanceParams) WithDefaults() *GetInstanceParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get instance params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetInstanceParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get instance params
+func (o *GetInstanceParams) WithTimeout(timeout time.Duration) *GetInstanceParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get instance params
+func (o *GetInstanceParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get instance params
+func (o *GetInstanceParams) WithContext(ctx context.Context) *GetInstanceParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get instance params
+func (o *GetInstanceParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get instance params
+func (o *GetInstanceParams) WithHTTPClient(client *http.Client) *GetInstanceParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get instance params
+func (o *GetInstanceParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithInstanceName adds the instanceName to the get instance params
+func (o *GetInstanceParams) WithInstanceName(instanceName string) *GetInstanceParams {
+ o.SetInstanceName(instanceName)
+ return o
+}
+
+// SetInstanceName adds the instanceName to the get instance params
+func (o *GetInstanceParams) SetInstanceName(instanceName string) {
+ o.InstanceName = instanceName
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetInstanceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param instanceName
+ if err := r.SetPathParam("instanceName", o.InstanceName); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/instances/get_instance_responses.go b/client/instances/get_instance_responses.go
new file mode 100644
index 00000000..abdbc131
--- /dev/null
+++ b/client/instances/get_instance_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetInstanceReader is a Reader for the GetInstance structure.
+type GetInstanceReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetInstanceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetInstanceOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetInstanceDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetInstanceOK creates a GetInstanceOK with default headers values
+func NewGetInstanceOK() *GetInstanceOK {
+ return &GetInstanceOK{}
+}
+
+/*
+GetInstanceOK describes a response with status code 200, with default header values.
+
+Instance
+*/
+type GetInstanceOK struct {
+ Payload garm_params.Instance
+}
+
+// IsSuccess returns true when this get instance o k response has a 2xx status code
+func (o *GetInstanceOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get instance o k response has a 3xx status code
+func (o *GetInstanceOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get instance o k response has a 4xx status code
+func (o *GetInstanceOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get instance o k response has a 5xx status code
+func (o *GetInstanceOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get instance o k response a status code equal to that given
+func (o *GetInstanceOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get instance o k response
+func (o *GetInstanceOK) Code() int {
+ return 200
+}
+
+func (o *GetInstanceOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances/{instanceName}][%d] getInstanceOK %s", 200, payload)
+}
+
+func (o *GetInstanceOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances/{instanceName}][%d] getInstanceOK %s", 200, payload)
+}
+
+func (o *GetInstanceOK) GetPayload() garm_params.Instance {
+ return o.Payload
+}
+
+func (o *GetInstanceOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetInstanceDefault creates a GetInstanceDefault with default headers values
+func NewGetInstanceDefault(code int) *GetInstanceDefault {
+ return &GetInstanceDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetInstanceDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetInstanceDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get instance default response has a 2xx status code
+func (o *GetInstanceDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get instance default response has a 3xx status code
+func (o *GetInstanceDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get instance default response has a 4xx status code
+func (o *GetInstanceDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get instance default response has a 5xx status code
+func (o *GetInstanceDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get instance default response a status code equal to that given
+func (o *GetInstanceDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get instance default response
+func (o *GetInstanceDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetInstanceDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances/{instanceName}][%d] GetInstance default %s", o._statusCode, payload)
+}
+
+func (o *GetInstanceDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances/{instanceName}][%d] GetInstance default %s", o._statusCode, payload)
+}
+
+func (o *GetInstanceDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetInstanceDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/instances/instances_client.go b/client/instances/instances_client.go
new file mode 100644
index 00000000..2c41f919
--- /dev/null
+++ b/client/instances/instances_client.go
@@ -0,0 +1,257 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new instances API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new instances API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new instances API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for instances API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ DeleteInstance(params *DeleteInstanceParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ GetInstance(params *GetInstanceParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetInstanceOK, error)
+
+ ListInstances(params *ListInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListInstancesOK, error)
+
+ ListPoolInstances(params *ListPoolInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListPoolInstancesOK, error)
+
+ ListScaleSetInstances(params *ListScaleSetInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListScaleSetInstancesOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+DeleteInstance deletes runner instance by name
+*/
+func (a *Client) DeleteInstance(params *DeleteInstanceParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteInstanceParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteInstance",
+ Method: "DELETE",
+ PathPattern: "/instances/{instanceName}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteInstanceReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+GetInstance gets runner instance by name
+*/
+func (a *Client) GetInstance(params *GetInstanceParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetInstanceOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetInstanceParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetInstance",
+ Method: "GET",
+ PathPattern: "/instances/{instanceName}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetInstanceReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetInstanceOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetInstanceDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListInstances gets all runners instances
+*/
+func (a *Client) ListInstances(params *ListInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListInstancesOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListInstancesParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListInstances",
+ Method: "GET",
+ PathPattern: "/instances",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListInstancesReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListInstancesOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListInstancesDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListPoolInstances lists runner instances in a pool
+*/
+func (a *Client) ListPoolInstances(params *ListPoolInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListPoolInstancesOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListPoolInstancesParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListPoolInstances",
+ Method: "GET",
+ PathPattern: "/pools/{poolID}/instances",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListPoolInstancesReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListPoolInstancesOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListPoolInstancesDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListScaleSetInstances lists runner instances in a scale set
+*/
+func (a *Client) ListScaleSetInstances(params *ListScaleSetInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListScaleSetInstancesOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListScaleSetInstancesParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListScaleSetInstances",
+ Method: "GET",
+ PathPattern: "/scalesets/{scalesetID}/instances",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListScaleSetInstancesReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListScaleSetInstancesOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListScaleSetInstancesDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/instances/list_instances_parameters.go b/client/instances/list_instances_parameters.go
new file mode 100644
index 00000000..1c84ec1f
--- /dev/null
+++ b/client/instances/list_instances_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListInstancesParams creates a new ListInstancesParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListInstancesParams() *ListInstancesParams {
+ return &ListInstancesParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListInstancesParamsWithTimeout creates a new ListInstancesParams object
+// with the ability to set a timeout on a request.
+func NewListInstancesParamsWithTimeout(timeout time.Duration) *ListInstancesParams {
+ return &ListInstancesParams{
+ timeout: timeout,
+ }
+}
+
+// NewListInstancesParamsWithContext creates a new ListInstancesParams object
+// with the ability to set a context for a request.
+func NewListInstancesParamsWithContext(ctx context.Context) *ListInstancesParams {
+ return &ListInstancesParams{
+ Context: ctx,
+ }
+}
+
+// NewListInstancesParamsWithHTTPClient creates a new ListInstancesParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListInstancesParamsWithHTTPClient(client *http.Client) *ListInstancesParams {
+ return &ListInstancesParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListInstancesParams contains all the parameters to send to the API endpoint
+
+ for the list instances operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListInstancesParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListInstancesParams) WithDefaults() *ListInstancesParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListInstancesParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list instances params
+func (o *ListInstancesParams) WithTimeout(timeout time.Duration) *ListInstancesParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list instances params
+func (o *ListInstancesParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list instances params
+func (o *ListInstancesParams) WithContext(ctx context.Context) *ListInstancesParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list instances params
+func (o *ListInstancesParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list instances params
+func (o *ListInstancesParams) WithHTTPClient(client *http.Client) *ListInstancesParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list instances params
+func (o *ListInstancesParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListInstancesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/instances/list_instances_responses.go b/client/instances/list_instances_responses.go
new file mode 100644
index 00000000..c81d3cf6
--- /dev/null
+++ b/client/instances/list_instances_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListInstancesReader is a Reader for the ListInstances structure.
+type ListInstancesReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListInstancesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListInstancesOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListInstancesDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListInstancesOK creates a ListInstancesOK with default headers values
+func NewListInstancesOK() *ListInstancesOK {
+ return &ListInstancesOK{}
+}
+
+/*
+ListInstancesOK describes a response with status code 200, with default header values.
+
+Instances
+*/
+type ListInstancesOK struct {
+ Payload garm_params.Instances
+}
+
+// IsSuccess returns true when this list instances o k response has a 2xx status code
+func (o *ListInstancesOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list instances o k response has a 3xx status code
+func (o *ListInstancesOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list instances o k response has a 4xx status code
+func (o *ListInstancesOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list instances o k response has a 5xx status code
+func (o *ListInstancesOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list instances o k response a status code equal to that given
+func (o *ListInstancesOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list instances o k response
+func (o *ListInstancesOK) Code() int {
+ return 200
+}
+
+func (o *ListInstancesOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances][%d] listInstancesOK %s", 200, payload)
+}
+
+func (o *ListInstancesOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances][%d] listInstancesOK %s", 200, payload)
+}
+
+func (o *ListInstancesOK) GetPayload() garm_params.Instances {
+ return o.Payload
+}
+
+func (o *ListInstancesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListInstancesDefault creates a ListInstancesDefault with default headers values
+func NewListInstancesDefault(code int) *ListInstancesDefault {
+ return &ListInstancesDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListInstancesDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListInstancesDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list instances default response has a 2xx status code
+func (o *ListInstancesDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list instances default response has a 3xx status code
+func (o *ListInstancesDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list instances default response has a 4xx status code
+func (o *ListInstancesDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list instances default response has a 5xx status code
+func (o *ListInstancesDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list instances default response a status code equal to that given
+func (o *ListInstancesDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list instances default response
+func (o *ListInstancesDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListInstancesDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances][%d] ListInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListInstancesDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /instances][%d] ListInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListInstancesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/instances/list_pool_instances_parameters.go b/client/instances/list_pool_instances_parameters.go
new file mode 100644
index 00000000..622010de
--- /dev/null
+++ b/client/instances/list_pool_instances_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListPoolInstancesParams creates a new ListPoolInstancesParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListPoolInstancesParams() *ListPoolInstancesParams {
+ return &ListPoolInstancesParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListPoolInstancesParamsWithTimeout creates a new ListPoolInstancesParams object
+// with the ability to set a timeout on a request.
+func NewListPoolInstancesParamsWithTimeout(timeout time.Duration) *ListPoolInstancesParams {
+ return &ListPoolInstancesParams{
+ timeout: timeout,
+ }
+}
+
+// NewListPoolInstancesParamsWithContext creates a new ListPoolInstancesParams object
+// with the ability to set a context for a request.
+func NewListPoolInstancesParamsWithContext(ctx context.Context) *ListPoolInstancesParams {
+ return &ListPoolInstancesParams{
+ Context: ctx,
+ }
+}
+
+// NewListPoolInstancesParamsWithHTTPClient creates a new ListPoolInstancesParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListPoolInstancesParamsWithHTTPClient(client *http.Client) *ListPoolInstancesParams {
+ return &ListPoolInstancesParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListPoolInstancesParams contains all the parameters to send to the API endpoint
+
+ for the list pool instances operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListPoolInstancesParams struct {
+
+ /* PoolID.
+
+ Runner pool ID.
+ */
+ PoolID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list pool instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListPoolInstancesParams) WithDefaults() *ListPoolInstancesParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list pool instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListPoolInstancesParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list pool instances params
+func (o *ListPoolInstancesParams) WithTimeout(timeout time.Duration) *ListPoolInstancesParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list pool instances params
+func (o *ListPoolInstancesParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list pool instances params
+func (o *ListPoolInstancesParams) WithContext(ctx context.Context) *ListPoolInstancesParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list pool instances params
+func (o *ListPoolInstancesParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list pool instances params
+func (o *ListPoolInstancesParams) WithHTTPClient(client *http.Client) *ListPoolInstancesParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list pool instances params
+func (o *ListPoolInstancesParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithPoolID adds the poolID to the list pool instances params
+func (o *ListPoolInstancesParams) WithPoolID(poolID string) *ListPoolInstancesParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the list pool instances params
+func (o *ListPoolInstancesParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListPoolInstancesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/instances/list_pool_instances_responses.go b/client/instances/list_pool_instances_responses.go
new file mode 100644
index 00000000..22e8d313
--- /dev/null
+++ b/client/instances/list_pool_instances_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListPoolInstancesReader is a Reader for the ListPoolInstances structure.
+type ListPoolInstancesReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListPoolInstancesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListPoolInstancesOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListPoolInstancesDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListPoolInstancesOK creates a ListPoolInstancesOK with default headers values
+func NewListPoolInstancesOK() *ListPoolInstancesOK {
+ return &ListPoolInstancesOK{}
+}
+
+/*
+ListPoolInstancesOK describes a response with status code 200, with default header values.
+
+Instances
+*/
+type ListPoolInstancesOK struct {
+ Payload garm_params.Instances
+}
+
+// IsSuccess returns true when this list pool instances o k response has a 2xx status code
+func (o *ListPoolInstancesOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list pool instances o k response has a 3xx status code
+func (o *ListPoolInstancesOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list pool instances o k response has a 4xx status code
+func (o *ListPoolInstancesOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list pool instances o k response has a 5xx status code
+func (o *ListPoolInstancesOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list pool instances o k response a status code equal to that given
+func (o *ListPoolInstancesOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list pool instances o k response
+func (o *ListPoolInstancesOK) Code() int {
+ return 200
+}
+
+func (o *ListPoolInstancesOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] listPoolInstancesOK %s", 200, payload)
+}
+
+func (o *ListPoolInstancesOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] listPoolInstancesOK %s", 200, payload)
+}
+
+func (o *ListPoolInstancesOK) GetPayload() garm_params.Instances {
+ return o.Payload
+}
+
+func (o *ListPoolInstancesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListPoolInstancesDefault creates a ListPoolInstancesDefault with default headers values
+func NewListPoolInstancesDefault(code int) *ListPoolInstancesDefault {
+ return &ListPoolInstancesDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListPoolInstancesDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListPoolInstancesDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list pool instances default response has a 2xx status code
+func (o *ListPoolInstancesDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list pool instances default response has a 3xx status code
+func (o *ListPoolInstancesDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list pool instances default response has a 4xx status code
+func (o *ListPoolInstancesDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list pool instances default response has a 5xx status code
+func (o *ListPoolInstancesDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list pool instances default response a status code equal to that given
+func (o *ListPoolInstancesDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list pool instances default response
+func (o *ListPoolInstancesDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListPoolInstancesDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] ListPoolInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListPoolInstancesDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}/instances][%d] ListPoolInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListPoolInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListPoolInstancesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/instances/list_scale_set_instances_parameters.go b/client/instances/list_scale_set_instances_parameters.go
new file mode 100644
index 00000000..7b38ef82
--- /dev/null
+++ b/client/instances/list_scale_set_instances_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListScaleSetInstancesParams creates a new ListScaleSetInstancesParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListScaleSetInstancesParams() *ListScaleSetInstancesParams {
+ return &ListScaleSetInstancesParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListScaleSetInstancesParamsWithTimeout creates a new ListScaleSetInstancesParams object
+// with the ability to set a timeout on a request.
+func NewListScaleSetInstancesParamsWithTimeout(timeout time.Duration) *ListScaleSetInstancesParams {
+ return &ListScaleSetInstancesParams{
+ timeout: timeout,
+ }
+}
+
+// NewListScaleSetInstancesParamsWithContext creates a new ListScaleSetInstancesParams object
+// with the ability to set a context for a request.
+func NewListScaleSetInstancesParamsWithContext(ctx context.Context) *ListScaleSetInstancesParams {
+ return &ListScaleSetInstancesParams{
+ Context: ctx,
+ }
+}
+
+// NewListScaleSetInstancesParamsWithHTTPClient creates a new ListScaleSetInstancesParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListScaleSetInstancesParamsWithHTTPClient(client *http.Client) *ListScaleSetInstancesParams {
+ return &ListScaleSetInstancesParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListScaleSetInstancesParams contains all the parameters to send to the API endpoint
+
+ for the list scale set instances operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListScaleSetInstancesParams struct {
+
+ /* ScalesetID.
+
+ Runner scale set ID.
+ */
+ ScalesetID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list scale set instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListScaleSetInstancesParams) WithDefaults() *ListScaleSetInstancesParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list scale set instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListScaleSetInstancesParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list scale set instances params
+func (o *ListScaleSetInstancesParams) WithTimeout(timeout time.Duration) *ListScaleSetInstancesParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list scale set instances params
+func (o *ListScaleSetInstancesParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list scale set instances params
+func (o *ListScaleSetInstancesParams) WithContext(ctx context.Context) *ListScaleSetInstancesParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list scale set instances params
+func (o *ListScaleSetInstancesParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list scale set instances params
+func (o *ListScaleSetInstancesParams) WithHTTPClient(client *http.Client) *ListScaleSetInstancesParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list scale set instances params
+func (o *ListScaleSetInstancesParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithScalesetID adds the scalesetID to the list scale set instances params
+func (o *ListScaleSetInstancesParams) WithScalesetID(scalesetID string) *ListScaleSetInstancesParams {
+ o.SetScalesetID(scalesetID)
+ return o
+}
+
+// SetScalesetID adds the scalesetId to the list scale set instances params
+func (o *ListScaleSetInstancesParams) SetScalesetID(scalesetID string) {
+ o.ScalesetID = scalesetID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListScaleSetInstancesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param scalesetID
+ if err := r.SetPathParam("scalesetID", o.ScalesetID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/instances/list_scale_set_instances_responses.go b/client/instances/list_scale_set_instances_responses.go
new file mode 100644
index 00000000..a966a9e7
--- /dev/null
+++ b/client/instances/list_scale_set_instances_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package instances
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListScaleSetInstancesReader is a Reader for the ListScaleSetInstances structure.
+type ListScaleSetInstancesReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListScaleSetInstancesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListScaleSetInstancesOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListScaleSetInstancesDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListScaleSetInstancesOK creates a ListScaleSetInstancesOK with default headers values
+func NewListScaleSetInstancesOK() *ListScaleSetInstancesOK {
+ return &ListScaleSetInstancesOK{}
+}
+
+/*
+ListScaleSetInstancesOK describes a response with status code 200, with default header values.
+
+Instances
+*/
+type ListScaleSetInstancesOK struct {
+ Payload garm_params.Instances
+}
+
+// IsSuccess returns true when this list scale set instances o k response has a 2xx status code
+func (o *ListScaleSetInstancesOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list scale set instances o k response has a 3xx status code
+func (o *ListScaleSetInstancesOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list scale set instances o k response has a 4xx status code
+func (o *ListScaleSetInstancesOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list scale set instances o k response has a 5xx status code
+func (o *ListScaleSetInstancesOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list scale set instances o k response a status code equal to that given
+func (o *ListScaleSetInstancesOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list scale set instances o k response
+func (o *ListScaleSetInstancesOK) Code() int {
+ return 200
+}
+
+func (o *ListScaleSetInstancesOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}/instances][%d] listScaleSetInstancesOK %s", 200, payload)
+}
+
+func (o *ListScaleSetInstancesOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}/instances][%d] listScaleSetInstancesOK %s", 200, payload)
+}
+
+func (o *ListScaleSetInstancesOK) GetPayload() garm_params.Instances {
+ return o.Payload
+}
+
+func (o *ListScaleSetInstancesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListScaleSetInstancesDefault creates a ListScaleSetInstancesDefault with default headers values
+func NewListScaleSetInstancesDefault(code int) *ListScaleSetInstancesDefault {
+ return &ListScaleSetInstancesDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListScaleSetInstancesDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListScaleSetInstancesDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list scale set instances default response has a 2xx status code
+func (o *ListScaleSetInstancesDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list scale set instances default response has a 3xx status code
+func (o *ListScaleSetInstancesDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list scale set instances default response has a 4xx status code
+func (o *ListScaleSetInstancesDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list scale set instances default response has a 5xx status code
+func (o *ListScaleSetInstancesDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list scale set instances default response a status code equal to that given
+func (o *ListScaleSetInstancesDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list scale set instances default response
+func (o *ListScaleSetInstancesDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListScaleSetInstancesDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}/instances][%d] ListScaleSetInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListScaleSetInstancesDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}/instances][%d] ListScaleSetInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListScaleSetInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListScaleSetInstancesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/jobs/jobs_client.go b/client/jobs/jobs_client.go
new file mode 100644
index 00000000..0e1e1399
--- /dev/null
+++ b/client/jobs/jobs_client.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package jobs
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new jobs API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new jobs API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new jobs API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for jobs API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ ListJobs(params *ListJobsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListJobsOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+ListJobs lists all jobs
+*/
+func (a *Client) ListJobs(params *ListJobsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListJobsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListJobsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListJobs",
+ Method: "GET",
+ PathPattern: "/jobs",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListJobsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListJobsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for ListJobs: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/jobs/list_jobs_parameters.go b/client/jobs/list_jobs_parameters.go
new file mode 100644
index 00000000..9f605015
--- /dev/null
+++ b/client/jobs/list_jobs_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package jobs
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListJobsParams creates a new ListJobsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListJobsParams() *ListJobsParams {
+ return &ListJobsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListJobsParamsWithTimeout creates a new ListJobsParams object
+// with the ability to set a timeout on a request.
+func NewListJobsParamsWithTimeout(timeout time.Duration) *ListJobsParams {
+ return &ListJobsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListJobsParamsWithContext creates a new ListJobsParams object
+// with the ability to set a context for a request.
+func NewListJobsParamsWithContext(ctx context.Context) *ListJobsParams {
+ return &ListJobsParams{
+ Context: ctx,
+ }
+}
+
+// NewListJobsParamsWithHTTPClient creates a new ListJobsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListJobsParamsWithHTTPClient(client *http.Client) *ListJobsParams {
+ return &ListJobsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListJobsParams contains all the parameters to send to the API endpoint
+
+ for the list jobs operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListJobsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list jobs params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListJobsParams) WithDefaults() *ListJobsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list jobs params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListJobsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list jobs params
+func (o *ListJobsParams) WithTimeout(timeout time.Duration) *ListJobsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list jobs params
+func (o *ListJobsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list jobs params
+func (o *ListJobsParams) WithContext(ctx context.Context) *ListJobsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list jobs params
+func (o *ListJobsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list jobs params
+func (o *ListJobsParams) WithHTTPClient(client *http.Client) *ListJobsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list jobs params
+func (o *ListJobsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListJobsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/jobs/list_jobs_responses.go b/client/jobs/list_jobs_responses.go
new file mode 100644
index 00000000..1b8c445a
--- /dev/null
+++ b/client/jobs/list_jobs_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package jobs
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListJobsReader is a Reader for the ListJobs structure.
+type ListJobsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListJobsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListJobsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewListJobsBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /jobs] ListJobs", response, response.Code())
+ }
+}
+
+// NewListJobsOK creates a ListJobsOK with default headers values
+func NewListJobsOK() *ListJobsOK {
+ return &ListJobsOK{}
+}
+
+/*
+ListJobsOK describes a response with status code 200, with default header values.
+
+Jobs
+*/
+type ListJobsOK struct {
+ Payload garm_params.Jobs
+}
+
+// IsSuccess returns true when this list jobs o k response has a 2xx status code
+func (o *ListJobsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list jobs o k response has a 3xx status code
+func (o *ListJobsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list jobs o k response has a 4xx status code
+func (o *ListJobsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list jobs o k response has a 5xx status code
+func (o *ListJobsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list jobs o k response a status code equal to that given
+func (o *ListJobsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list jobs o k response
+func (o *ListJobsOK) Code() int {
+ return 200
+}
+
+func (o *ListJobsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /jobs][%d] listJobsOK %s", 200, payload)
+}
+
+func (o *ListJobsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /jobs][%d] listJobsOK %s", 200, payload)
+}
+
+func (o *ListJobsOK) GetPayload() garm_params.Jobs {
+ return o.Payload
+}
+
+func (o *ListJobsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListJobsBadRequest creates a ListJobsBadRequest with default headers values
+func NewListJobsBadRequest() *ListJobsBadRequest {
+ return &ListJobsBadRequest{}
+}
+
+/*
+ListJobsBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type ListJobsBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list jobs bad request response has a 2xx status code
+func (o *ListJobsBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this list jobs bad request response has a 3xx status code
+func (o *ListJobsBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list jobs bad request response has a 4xx status code
+func (o *ListJobsBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this list jobs bad request response has a 5xx status code
+func (o *ListJobsBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list jobs bad request response a status code equal to that given
+func (o *ListJobsBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the list jobs bad request response
+func (o *ListJobsBadRequest) Code() int {
+ return 400
+}
+
+func (o *ListJobsBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /jobs][%d] listJobsBadRequest %s", 400, payload)
+}
+
+func (o *ListJobsBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /jobs][%d] listJobsBadRequest %s", 400, payload)
+}
+
+func (o *ListJobsBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListJobsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/login/login_client.go b/client/login/login_client.go
new file mode 100644
index 00000000..0e9f53de
--- /dev/null
+++ b/client/login/login_client.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package login
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new login API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new login API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new login API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for login API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ Login(params *LoginParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*LoginOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+Login logs in a user and returns a j w t token
+*/
+func (a *Client) Login(params *LoginParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*LoginOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewLoginParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "Login",
+ Method: "POST",
+ PathPattern: "/auth/login",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &LoginReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*LoginOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for Login: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/login/login_parameters.go b/client/login/login_parameters.go
new file mode 100644
index 00000000..02ea77bc
--- /dev/null
+++ b/client/login/login_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package login
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewLoginParams creates a new LoginParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewLoginParams() *LoginParams {
+ return &LoginParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewLoginParamsWithTimeout creates a new LoginParams object
+// with the ability to set a timeout on a request.
+func NewLoginParamsWithTimeout(timeout time.Duration) *LoginParams {
+ return &LoginParams{
+ timeout: timeout,
+ }
+}
+
+// NewLoginParamsWithContext creates a new LoginParams object
+// with the ability to set a context for a request.
+func NewLoginParamsWithContext(ctx context.Context) *LoginParams {
+ return &LoginParams{
+ Context: ctx,
+ }
+}
+
+// NewLoginParamsWithHTTPClient creates a new LoginParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewLoginParamsWithHTTPClient(client *http.Client) *LoginParams {
+ return &LoginParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+LoginParams contains all the parameters to send to the API endpoint
+
+ for the login operation.
+
+ Typically these are written to a http.Request.
+*/
+type LoginParams struct {
+
+ /* Body.
+
+ Login information.
+ */
+ Body garm_params.PasswordLoginParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the login params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *LoginParams) WithDefaults() *LoginParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the login params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *LoginParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the login params
+func (o *LoginParams) WithTimeout(timeout time.Duration) *LoginParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the login params
+func (o *LoginParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the login params
+func (o *LoginParams) WithContext(ctx context.Context) *LoginParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the login params
+func (o *LoginParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the login params
+func (o *LoginParams) WithHTTPClient(client *http.Client) *LoginParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the login params
+func (o *LoginParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the login params
+func (o *LoginParams) WithBody(body garm_params.PasswordLoginParams) *LoginParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the login params
+func (o *LoginParams) SetBody(body garm_params.PasswordLoginParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *LoginParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/login/login_responses.go b/client/login/login_responses.go
new file mode 100644
index 00000000..7aae2a69
--- /dev/null
+++ b/client/login/login_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package login
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// LoginReader is a Reader for the Login structure.
+type LoginReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *LoginReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewLoginOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewLoginBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[POST /auth/login] Login", response, response.Code())
+ }
+}
+
+// NewLoginOK creates a LoginOK with default headers values
+func NewLoginOK() *LoginOK {
+ return &LoginOK{}
+}
+
+/*
+LoginOK describes a response with status code 200, with default header values.
+
+JWTResponse
+*/
+type LoginOK struct {
+ Payload garm_params.JWTResponse
+}
+
+// IsSuccess returns true when this login o k response has a 2xx status code
+func (o *LoginOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this login o k response has a 3xx status code
+func (o *LoginOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this login o k response has a 4xx status code
+func (o *LoginOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this login o k response has a 5xx status code
+func (o *LoginOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this login o k response a status code equal to that given
+func (o *LoginOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the login o k response
+func (o *LoginOK) Code() int {
+ return 200
+}
+
+func (o *LoginOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /auth/login][%d] loginOK %s", 200, payload)
+}
+
+func (o *LoginOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /auth/login][%d] loginOK %s", 200, payload)
+}
+
+func (o *LoginOK) GetPayload() garm_params.JWTResponse {
+ return o.Payload
+}
+
+func (o *LoginOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewLoginBadRequest creates a LoginBadRequest with default headers values
+func NewLoginBadRequest() *LoginBadRequest {
+ return &LoginBadRequest{}
+}
+
+/*
+LoginBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type LoginBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this login bad request response has a 2xx status code
+func (o *LoginBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this login bad request response has a 3xx status code
+func (o *LoginBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this login bad request response has a 4xx status code
+func (o *LoginBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this login bad request response has a 5xx status code
+func (o *LoginBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this login bad request response a status code equal to that given
+func (o *LoginBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the login bad request response
+func (o *LoginBadRequest) Code() int {
+ return 400
+}
+
+func (o *LoginBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /auth/login][%d] loginBadRequest %s", 400, payload)
+}
+
+func (o *LoginBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /auth/login][%d] loginBadRequest %s", 400, payload)
+}
+
+func (o *LoginBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *LoginBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/metrics_token/get_metrics_token_parameters.go b/client/metrics_token/get_metrics_token_parameters.go
new file mode 100644
index 00000000..5099e113
--- /dev/null
+++ b/client/metrics_token/get_metrics_token_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package metrics_token
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetMetricsTokenParams creates a new GetMetricsTokenParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetMetricsTokenParams() *GetMetricsTokenParams {
+ return &GetMetricsTokenParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetMetricsTokenParamsWithTimeout creates a new GetMetricsTokenParams object
+// with the ability to set a timeout on a request.
+func NewGetMetricsTokenParamsWithTimeout(timeout time.Duration) *GetMetricsTokenParams {
+ return &GetMetricsTokenParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetMetricsTokenParamsWithContext creates a new GetMetricsTokenParams object
+// with the ability to set a context for a request.
+func NewGetMetricsTokenParamsWithContext(ctx context.Context) *GetMetricsTokenParams {
+ return &GetMetricsTokenParams{
+ Context: ctx,
+ }
+}
+
+// NewGetMetricsTokenParamsWithHTTPClient creates a new GetMetricsTokenParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetMetricsTokenParamsWithHTTPClient(client *http.Client) *GetMetricsTokenParams {
+ return &GetMetricsTokenParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetMetricsTokenParams contains all the parameters to send to the API endpoint
+
+ for the get metrics token operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetMetricsTokenParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get metrics token params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetMetricsTokenParams) WithDefaults() *GetMetricsTokenParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get metrics token params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetMetricsTokenParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get metrics token params
+func (o *GetMetricsTokenParams) WithTimeout(timeout time.Duration) *GetMetricsTokenParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get metrics token params
+func (o *GetMetricsTokenParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get metrics token params
+func (o *GetMetricsTokenParams) WithContext(ctx context.Context) *GetMetricsTokenParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get metrics token params
+func (o *GetMetricsTokenParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get metrics token params
+func (o *GetMetricsTokenParams) WithHTTPClient(client *http.Client) *GetMetricsTokenParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get metrics token params
+func (o *GetMetricsTokenParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetMetricsTokenParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/metrics_token/get_metrics_token_responses.go b/client/metrics_token/get_metrics_token_responses.go
new file mode 100644
index 00000000..ea371cc4
--- /dev/null
+++ b/client/metrics_token/get_metrics_token_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package metrics_token
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetMetricsTokenReader is a Reader for the GetMetricsToken structure.
+type GetMetricsTokenReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetMetricsTokenReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetMetricsTokenOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 401:
+ result := NewGetMetricsTokenUnauthorized()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /metrics-token] GetMetricsToken", response, response.Code())
+ }
+}
+
+// NewGetMetricsTokenOK creates a GetMetricsTokenOK with default headers values
+func NewGetMetricsTokenOK() *GetMetricsTokenOK {
+ return &GetMetricsTokenOK{}
+}
+
+/*
+GetMetricsTokenOK describes a response with status code 200, with default header values.
+
+JWTResponse
+*/
+type GetMetricsTokenOK struct {
+ Payload garm_params.JWTResponse
+}
+
+// IsSuccess returns true when this get metrics token o k response has a 2xx status code
+func (o *GetMetricsTokenOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get metrics token o k response has a 3xx status code
+func (o *GetMetricsTokenOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get metrics token o k response has a 4xx status code
+func (o *GetMetricsTokenOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get metrics token o k response has a 5xx status code
+func (o *GetMetricsTokenOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get metrics token o k response a status code equal to that given
+func (o *GetMetricsTokenOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get metrics token o k response
+func (o *GetMetricsTokenOK) Code() int {
+ return 200
+}
+
+func (o *GetMetricsTokenOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenOK %s", 200, payload)
+}
+
+func (o *GetMetricsTokenOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenOK %s", 200, payload)
+}
+
+func (o *GetMetricsTokenOK) GetPayload() garm_params.JWTResponse {
+ return o.Payload
+}
+
+func (o *GetMetricsTokenOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetMetricsTokenUnauthorized creates a GetMetricsTokenUnauthorized with default headers values
+func NewGetMetricsTokenUnauthorized() *GetMetricsTokenUnauthorized {
+ return &GetMetricsTokenUnauthorized{}
+}
+
+/*
+GetMetricsTokenUnauthorized describes a response with status code 401, with default header values.
+
+APIErrorResponse
+*/
+type GetMetricsTokenUnauthorized struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get metrics token unauthorized response has a 2xx status code
+func (o *GetMetricsTokenUnauthorized) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get metrics token unauthorized response has a 3xx status code
+func (o *GetMetricsTokenUnauthorized) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get metrics token unauthorized response has a 4xx status code
+func (o *GetMetricsTokenUnauthorized) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get metrics token unauthorized response has a 5xx status code
+func (o *GetMetricsTokenUnauthorized) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get metrics token unauthorized response a status code equal to that given
+func (o *GetMetricsTokenUnauthorized) IsCode(code int) bool {
+ return code == 401
+}
+
+// Code gets the status code for the get metrics token unauthorized response
+func (o *GetMetricsTokenUnauthorized) Code() int {
+ return 401
+}
+
+func (o *GetMetricsTokenUnauthorized) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenUnauthorized %s", 401, payload)
+}
+
+func (o *GetMetricsTokenUnauthorized) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /metrics-token][%d] getMetricsTokenUnauthorized %s", 401, payload)
+}
+
+func (o *GetMetricsTokenUnauthorized) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetMetricsTokenUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/metrics_token/metrics_token_client.go b/client/metrics_token/metrics_token_client.go
new file mode 100644
index 00000000..d4d7c2d9
--- /dev/null
+++ b/client/metrics_token/metrics_token_client.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package metrics_token
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new metrics token API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new metrics token API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new metrics token API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for metrics token API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ GetMetricsToken(params *GetMetricsTokenParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetMetricsTokenOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+GetMetricsToken returns a j w t token that can be used to access the metrics endpoint
+*/
+func (a *Client) GetMetricsToken(params *GetMetricsTokenParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetMetricsTokenOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetMetricsTokenParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetMetricsToken",
+ Method: "GET",
+ PathPattern: "/metrics-token",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetMetricsTokenReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetMetricsTokenOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetMetricsToken: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/organizations/create_org_parameters.go b/client/organizations/create_org_parameters.go
new file mode 100644
index 00000000..bf183834
--- /dev/null
+++ b/client/organizations/create_org_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateOrgParams creates a new CreateOrgParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateOrgParams() *CreateOrgParams {
+ return &CreateOrgParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateOrgParamsWithTimeout creates a new CreateOrgParams object
+// with the ability to set a timeout on a request.
+func NewCreateOrgParamsWithTimeout(timeout time.Duration) *CreateOrgParams {
+ return &CreateOrgParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateOrgParamsWithContext creates a new CreateOrgParams object
+// with the ability to set a context for a request.
+func NewCreateOrgParamsWithContext(ctx context.Context) *CreateOrgParams {
+ return &CreateOrgParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateOrgParamsWithHTTPClient creates a new CreateOrgParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateOrgParamsWithHTTPClient(client *http.Client) *CreateOrgParams {
+ return &CreateOrgParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateOrgParams contains all the parameters to send to the API endpoint
+
+ for the create org operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateOrgParams struct {
+
+ /* Body.
+
+ Parameters used when creating the organization.
+ */
+ Body garm_params.CreateOrgParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create org params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateOrgParams) WithDefaults() *CreateOrgParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create org params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateOrgParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create org params
+func (o *CreateOrgParams) WithTimeout(timeout time.Duration) *CreateOrgParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create org params
+func (o *CreateOrgParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create org params
+func (o *CreateOrgParams) WithContext(ctx context.Context) *CreateOrgParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create org params
+func (o *CreateOrgParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create org params
+func (o *CreateOrgParams) WithHTTPClient(client *http.Client) *CreateOrgParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create org params
+func (o *CreateOrgParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create org params
+func (o *CreateOrgParams) WithBody(body garm_params.CreateOrgParams) *CreateOrgParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create org params
+func (o *CreateOrgParams) SetBody(body garm_params.CreateOrgParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateOrgParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/create_org_pool_parameters.go b/client/organizations/create_org_pool_parameters.go
new file mode 100644
index 00000000..1fc46998
--- /dev/null
+++ b/client/organizations/create_org_pool_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateOrgPoolParams creates a new CreateOrgPoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateOrgPoolParams() *CreateOrgPoolParams {
+ return &CreateOrgPoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateOrgPoolParamsWithTimeout creates a new CreateOrgPoolParams object
+// with the ability to set a timeout on a request.
+func NewCreateOrgPoolParamsWithTimeout(timeout time.Duration) *CreateOrgPoolParams {
+ return &CreateOrgPoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateOrgPoolParamsWithContext creates a new CreateOrgPoolParams object
+// with the ability to set a context for a request.
+func NewCreateOrgPoolParamsWithContext(ctx context.Context) *CreateOrgPoolParams {
+ return &CreateOrgPoolParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateOrgPoolParamsWithHTTPClient creates a new CreateOrgPoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateOrgPoolParamsWithHTTPClient(client *http.Client) *CreateOrgPoolParams {
+ return &CreateOrgPoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateOrgPoolParams contains all the parameters to send to the API endpoint
+
+ for the create org pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateOrgPoolParams struct {
+
+ /* Body.
+
+ Parameters used when creating the organization pool.
+ */
+ Body garm_params.CreatePoolParams
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create org pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateOrgPoolParams) WithDefaults() *CreateOrgPoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create org pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateOrgPoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create org pool params
+func (o *CreateOrgPoolParams) WithTimeout(timeout time.Duration) *CreateOrgPoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create org pool params
+func (o *CreateOrgPoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create org pool params
+func (o *CreateOrgPoolParams) WithContext(ctx context.Context) *CreateOrgPoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create org pool params
+func (o *CreateOrgPoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create org pool params
+func (o *CreateOrgPoolParams) WithHTTPClient(client *http.Client) *CreateOrgPoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create org pool params
+func (o *CreateOrgPoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create org pool params
+func (o *CreateOrgPoolParams) WithBody(body garm_params.CreatePoolParams) *CreateOrgPoolParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create org pool params
+func (o *CreateOrgPoolParams) SetBody(body garm_params.CreatePoolParams) {
+ o.Body = body
+}
+
+// WithOrgID adds the orgID to the create org pool params
+func (o *CreateOrgPoolParams) WithOrgID(orgID string) *CreateOrgPoolParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the create org pool params
+func (o *CreateOrgPoolParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateOrgPoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/create_org_pool_responses.go b/client/organizations/create_org_pool_responses.go
new file mode 100644
index 00000000..7ebf9a07
--- /dev/null
+++ b/client/organizations/create_org_pool_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateOrgPoolReader is a Reader for the CreateOrgPool structure.
+type CreateOrgPoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateOrgPoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateOrgPoolOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateOrgPoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateOrgPoolOK creates a CreateOrgPoolOK with default headers values
+func NewCreateOrgPoolOK() *CreateOrgPoolOK {
+ return &CreateOrgPoolOK{}
+}
+
+/*
+CreateOrgPoolOK describes a response with status code 200, with default header values.
+
+Pool
+*/
+type CreateOrgPoolOK struct {
+ Payload garm_params.Pool
+}
+
+// IsSuccess returns true when this create org pool o k response has a 2xx status code
+func (o *CreateOrgPoolOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create org pool o k response has a 3xx status code
+func (o *CreateOrgPoolOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create org pool o k response has a 4xx status code
+func (o *CreateOrgPoolOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create org pool o k response has a 5xx status code
+func (o *CreateOrgPoolOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create org pool o k response a status code equal to that given
+func (o *CreateOrgPoolOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create org pool o k response
+func (o *CreateOrgPoolOK) Code() int {
+ return 200
+}
+
+func (o *CreateOrgPoolOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] createOrgPoolOK %s", 200, payload)
+}
+
+func (o *CreateOrgPoolOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] createOrgPoolOK %s", 200, payload)
+}
+
+func (o *CreateOrgPoolOK) GetPayload() garm_params.Pool {
+ return o.Payload
+}
+
+func (o *CreateOrgPoolOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateOrgPoolDefault creates a CreateOrgPoolDefault with default headers values
+func NewCreateOrgPoolDefault(code int) *CreateOrgPoolDefault {
+ return &CreateOrgPoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateOrgPoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateOrgPoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create org pool default response has a 2xx status code
+func (o *CreateOrgPoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create org pool default response has a 3xx status code
+func (o *CreateOrgPoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create org pool default response has a 4xx status code
+func (o *CreateOrgPoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create org pool default response has a 5xx status code
+func (o *CreateOrgPoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create org pool default response a status code equal to that given
+func (o *CreateOrgPoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create org pool default response
+func (o *CreateOrgPoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateOrgPoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] CreateOrgPool default %s", o._statusCode, payload)
+}
+
+func (o *CreateOrgPoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/pools][%d] CreateOrgPool default %s", o._statusCode, payload)
+}
+
+func (o *CreateOrgPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateOrgPoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/create_org_responses.go b/client/organizations/create_org_responses.go
new file mode 100644
index 00000000..e960e253
--- /dev/null
+++ b/client/organizations/create_org_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateOrgReader is a Reader for the CreateOrg structure.
+type CreateOrgReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateOrgReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateOrgOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateOrgDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateOrgOK creates a CreateOrgOK with default headers values
+func NewCreateOrgOK() *CreateOrgOK {
+ return &CreateOrgOK{}
+}
+
+/*
+CreateOrgOK describes a response with status code 200, with default header values.
+
+Organization
+*/
+type CreateOrgOK struct {
+ Payload garm_params.Organization
+}
+
+// IsSuccess returns true when this create org o k response has a 2xx status code
+func (o *CreateOrgOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create org o k response has a 3xx status code
+func (o *CreateOrgOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create org o k response has a 4xx status code
+func (o *CreateOrgOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create org o k response has a 5xx status code
+func (o *CreateOrgOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create org o k response a status code equal to that given
+func (o *CreateOrgOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create org o k response
+func (o *CreateOrgOK) Code() int {
+ return 200
+}
+
+func (o *CreateOrgOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations][%d] createOrgOK %s", 200, payload)
+}
+
+func (o *CreateOrgOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations][%d] createOrgOK %s", 200, payload)
+}
+
+func (o *CreateOrgOK) GetPayload() garm_params.Organization {
+ return o.Payload
+}
+
+func (o *CreateOrgOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateOrgDefault creates a CreateOrgDefault with default headers values
+func NewCreateOrgDefault(code int) *CreateOrgDefault {
+ return &CreateOrgDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateOrgDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateOrgDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create org default response has a 2xx status code
+func (o *CreateOrgDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create org default response has a 3xx status code
+func (o *CreateOrgDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create org default response has a 4xx status code
+func (o *CreateOrgDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create org default response has a 5xx status code
+func (o *CreateOrgDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create org default response a status code equal to that given
+func (o *CreateOrgDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create org default response
+func (o *CreateOrgDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateOrgDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations][%d] CreateOrg default %s", o._statusCode, payload)
+}
+
+func (o *CreateOrgDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations][%d] CreateOrg default %s", o._statusCode, payload)
+}
+
+func (o *CreateOrgDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateOrgDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/create_org_scale_set_parameters.go b/client/organizations/create_org_scale_set_parameters.go
new file mode 100644
index 00000000..0e222693
--- /dev/null
+++ b/client/organizations/create_org_scale_set_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateOrgScaleSetParams creates a new CreateOrgScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateOrgScaleSetParams() *CreateOrgScaleSetParams {
+ return &CreateOrgScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateOrgScaleSetParamsWithTimeout creates a new CreateOrgScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewCreateOrgScaleSetParamsWithTimeout(timeout time.Duration) *CreateOrgScaleSetParams {
+ return &CreateOrgScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateOrgScaleSetParamsWithContext creates a new CreateOrgScaleSetParams object
+// with the ability to set a context for a request.
+func NewCreateOrgScaleSetParamsWithContext(ctx context.Context) *CreateOrgScaleSetParams {
+ return &CreateOrgScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateOrgScaleSetParamsWithHTTPClient creates a new CreateOrgScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateOrgScaleSetParamsWithHTTPClient(client *http.Client) *CreateOrgScaleSetParams {
+ return &CreateOrgScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateOrgScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the create org scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateOrgScaleSetParams struct {
+
+ /* Body.
+
+ Parameters used when creating the organization scale set.
+ */
+ Body garm_params.CreateScaleSetParams
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create org scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateOrgScaleSetParams) WithDefaults() *CreateOrgScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create org scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateOrgScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create org scale set params
+func (o *CreateOrgScaleSetParams) WithTimeout(timeout time.Duration) *CreateOrgScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create org scale set params
+func (o *CreateOrgScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create org scale set params
+func (o *CreateOrgScaleSetParams) WithContext(ctx context.Context) *CreateOrgScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create org scale set params
+func (o *CreateOrgScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create org scale set params
+func (o *CreateOrgScaleSetParams) WithHTTPClient(client *http.Client) *CreateOrgScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create org scale set params
+func (o *CreateOrgScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create org scale set params
+func (o *CreateOrgScaleSetParams) WithBody(body garm_params.CreateScaleSetParams) *CreateOrgScaleSetParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create org scale set params
+func (o *CreateOrgScaleSetParams) SetBody(body garm_params.CreateScaleSetParams) {
+ o.Body = body
+}
+
+// WithOrgID adds the orgID to the create org scale set params
+func (o *CreateOrgScaleSetParams) WithOrgID(orgID string) *CreateOrgScaleSetParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the create org scale set params
+func (o *CreateOrgScaleSetParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateOrgScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/create_org_scale_set_responses.go b/client/organizations/create_org_scale_set_responses.go
new file mode 100644
index 00000000..3a91d03f
--- /dev/null
+++ b/client/organizations/create_org_scale_set_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateOrgScaleSetReader is a Reader for the CreateOrgScaleSet structure.
+type CreateOrgScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateOrgScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateOrgScaleSetOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateOrgScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateOrgScaleSetOK creates a CreateOrgScaleSetOK with default headers values
+func NewCreateOrgScaleSetOK() *CreateOrgScaleSetOK {
+ return &CreateOrgScaleSetOK{}
+}
+
+/*
+CreateOrgScaleSetOK describes a response with status code 200, with default header values.
+
+ScaleSet
+*/
+type CreateOrgScaleSetOK struct {
+ Payload garm_params.ScaleSet
+}
+
+// IsSuccess returns true when this create org scale set o k response has a 2xx status code
+func (o *CreateOrgScaleSetOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create org scale set o k response has a 3xx status code
+func (o *CreateOrgScaleSetOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create org scale set o k response has a 4xx status code
+func (o *CreateOrgScaleSetOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create org scale set o k response has a 5xx status code
+func (o *CreateOrgScaleSetOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create org scale set o k response a status code equal to that given
+func (o *CreateOrgScaleSetOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create org scale set o k response
+func (o *CreateOrgScaleSetOK) Code() int {
+ return 200
+}
+
+func (o *CreateOrgScaleSetOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/scalesets][%d] createOrgScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateOrgScaleSetOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/scalesets][%d] createOrgScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateOrgScaleSetOK) GetPayload() garm_params.ScaleSet {
+ return o.Payload
+}
+
+func (o *CreateOrgScaleSetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateOrgScaleSetDefault creates a CreateOrgScaleSetDefault with default headers values
+func NewCreateOrgScaleSetDefault(code int) *CreateOrgScaleSetDefault {
+ return &CreateOrgScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateOrgScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateOrgScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create org scale set default response has a 2xx status code
+func (o *CreateOrgScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create org scale set default response has a 3xx status code
+func (o *CreateOrgScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create org scale set default response has a 4xx status code
+func (o *CreateOrgScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create org scale set default response has a 5xx status code
+func (o *CreateOrgScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create org scale set default response a status code equal to that given
+func (o *CreateOrgScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create org scale set default response
+func (o *CreateOrgScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateOrgScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/scalesets][%d] CreateOrgScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateOrgScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/scalesets][%d] CreateOrgScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateOrgScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateOrgScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/delete_org_parameters.go b/client/organizations/delete_org_parameters.go
new file mode 100644
index 00000000..daf36813
--- /dev/null
+++ b/client/organizations/delete_org_parameters.go
@@ -0,0 +1,186 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewDeleteOrgParams creates a new DeleteOrgParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteOrgParams() *DeleteOrgParams {
+ return &DeleteOrgParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteOrgParamsWithTimeout creates a new DeleteOrgParams object
+// with the ability to set a timeout on a request.
+func NewDeleteOrgParamsWithTimeout(timeout time.Duration) *DeleteOrgParams {
+ return &DeleteOrgParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteOrgParamsWithContext creates a new DeleteOrgParams object
+// with the ability to set a context for a request.
+func NewDeleteOrgParamsWithContext(ctx context.Context) *DeleteOrgParams {
+ return &DeleteOrgParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteOrgParamsWithHTTPClient creates a new DeleteOrgParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteOrgParamsWithHTTPClient(client *http.Client) *DeleteOrgParams {
+ return &DeleteOrgParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteOrgParams contains all the parameters to send to the API endpoint
+
+ for the delete org operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteOrgParams struct {
+
+ /* KeepWebhook.
+
+ If true and a webhook is installed for this organization, it will not be removed.
+ */
+ KeepWebhook *bool
+
+ /* OrgID.
+
+ ID of the organization to delete.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete org params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteOrgParams) WithDefaults() *DeleteOrgParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete org params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteOrgParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete org params
+func (o *DeleteOrgParams) WithTimeout(timeout time.Duration) *DeleteOrgParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete org params
+func (o *DeleteOrgParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete org params
+func (o *DeleteOrgParams) WithContext(ctx context.Context) *DeleteOrgParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete org params
+func (o *DeleteOrgParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete org params
+func (o *DeleteOrgParams) WithHTTPClient(client *http.Client) *DeleteOrgParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete org params
+func (o *DeleteOrgParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithKeepWebhook adds the keepWebhook to the delete org params
+func (o *DeleteOrgParams) WithKeepWebhook(keepWebhook *bool) *DeleteOrgParams {
+ o.SetKeepWebhook(keepWebhook)
+ return o
+}
+
+// SetKeepWebhook adds the keepWebhook to the delete org params
+func (o *DeleteOrgParams) SetKeepWebhook(keepWebhook *bool) {
+ o.KeepWebhook = keepWebhook
+}
+
+// WithOrgID adds the orgID to the delete org params
+func (o *DeleteOrgParams) WithOrgID(orgID string) *DeleteOrgParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the delete org params
+func (o *DeleteOrgParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteOrgParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.KeepWebhook != nil {
+
+ // query param keepWebhook
+ var qrKeepWebhook bool
+
+ if o.KeepWebhook != nil {
+ qrKeepWebhook = *o.KeepWebhook
+ }
+ qKeepWebhook := swag.FormatBool(qrKeepWebhook)
+ if qKeepWebhook != "" {
+
+ if err := r.SetQueryParam("keepWebhook", qKeepWebhook); err != nil {
+ return err
+ }
+ }
+ }
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/delete_org_pool_parameters.go b/client/organizations/delete_org_pool_parameters.go
new file mode 100644
index 00000000..89500172
--- /dev/null
+++ b/client/organizations/delete_org_pool_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteOrgPoolParams creates a new DeleteOrgPoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteOrgPoolParams() *DeleteOrgPoolParams {
+ return &DeleteOrgPoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteOrgPoolParamsWithTimeout creates a new DeleteOrgPoolParams object
+// with the ability to set a timeout on a request.
+func NewDeleteOrgPoolParamsWithTimeout(timeout time.Duration) *DeleteOrgPoolParams {
+ return &DeleteOrgPoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteOrgPoolParamsWithContext creates a new DeleteOrgPoolParams object
+// with the ability to set a context for a request.
+func NewDeleteOrgPoolParamsWithContext(ctx context.Context) *DeleteOrgPoolParams {
+ return &DeleteOrgPoolParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteOrgPoolParamsWithHTTPClient creates a new DeleteOrgPoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteOrgPoolParamsWithHTTPClient(client *http.Client) *DeleteOrgPoolParams {
+ return &DeleteOrgPoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteOrgPoolParams contains all the parameters to send to the API endpoint
+
+ for the delete org pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteOrgPoolParams struct {
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ /* PoolID.
+
+ ID of the organization pool to delete.
+ */
+ PoolID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete org pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteOrgPoolParams) WithDefaults() *DeleteOrgPoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete org pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteOrgPoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete org pool params
+func (o *DeleteOrgPoolParams) WithTimeout(timeout time.Duration) *DeleteOrgPoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete org pool params
+func (o *DeleteOrgPoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete org pool params
+func (o *DeleteOrgPoolParams) WithContext(ctx context.Context) *DeleteOrgPoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete org pool params
+func (o *DeleteOrgPoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete org pool params
+func (o *DeleteOrgPoolParams) WithHTTPClient(client *http.Client) *DeleteOrgPoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete org pool params
+func (o *DeleteOrgPoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithOrgID adds the orgID to the delete org pool params
+func (o *DeleteOrgPoolParams) WithOrgID(orgID string) *DeleteOrgPoolParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the delete org pool params
+func (o *DeleteOrgPoolParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WithPoolID adds the poolID to the delete org pool params
+func (o *DeleteOrgPoolParams) WithPoolID(poolID string) *DeleteOrgPoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the delete org pool params
+func (o *DeleteOrgPoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteOrgPoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/delete_org_pool_responses.go b/client/organizations/delete_org_pool_responses.go
new file mode 100644
index 00000000..9bca3f30
--- /dev/null
+++ b/client/organizations/delete_org_pool_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteOrgPoolReader is a Reader for the DeleteOrgPool structure.
+type DeleteOrgPoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteOrgPoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteOrgPoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteOrgPoolDefault creates a DeleteOrgPoolDefault with default headers values
+func NewDeleteOrgPoolDefault(code int) *DeleteOrgPoolDefault {
+ return &DeleteOrgPoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteOrgPoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteOrgPoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete org pool default response has a 2xx status code
+func (o *DeleteOrgPoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete org pool default response has a 3xx status code
+func (o *DeleteOrgPoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete org pool default response has a 4xx status code
+func (o *DeleteOrgPoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete org pool default response has a 5xx status code
+func (o *DeleteOrgPoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete org pool default response a status code equal to that given
+func (o *DeleteOrgPoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete org pool default response
+func (o *DeleteOrgPoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteOrgPoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}/pools/{poolID}][%d] DeleteOrgPool default %s", o._statusCode, payload)
+}
+
+func (o *DeleteOrgPoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}/pools/{poolID}][%d] DeleteOrgPool default %s", o._statusCode, payload)
+}
+
+func (o *DeleteOrgPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteOrgPoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/delete_org_responses.go b/client/organizations/delete_org_responses.go
new file mode 100644
index 00000000..87d4ff19
--- /dev/null
+++ b/client/organizations/delete_org_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteOrgReader is a Reader for the DeleteOrg structure.
+type DeleteOrgReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteOrgReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteOrgDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteOrgDefault creates a DeleteOrgDefault with default headers values
+func NewDeleteOrgDefault(code int) *DeleteOrgDefault {
+ return &DeleteOrgDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteOrgDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteOrgDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete org default response has a 2xx status code
+func (o *DeleteOrgDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete org default response has a 3xx status code
+func (o *DeleteOrgDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete org default response has a 4xx status code
+func (o *DeleteOrgDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete org default response has a 5xx status code
+func (o *DeleteOrgDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete org default response a status code equal to that given
+func (o *DeleteOrgDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete org default response
+func (o *DeleteOrgDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteOrgDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}][%d] DeleteOrg default %s", o._statusCode, payload)
+}
+
+func (o *DeleteOrgDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}][%d] DeleteOrg default %s", o._statusCode, payload)
+}
+
+func (o *DeleteOrgDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteOrgDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/get_org_parameters.go b/client/organizations/get_org_parameters.go
new file mode 100644
index 00000000..b5bab9c0
--- /dev/null
+++ b/client/organizations/get_org_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetOrgParams creates a new GetOrgParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetOrgParams() *GetOrgParams {
+ return &GetOrgParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetOrgParamsWithTimeout creates a new GetOrgParams object
+// with the ability to set a timeout on a request.
+func NewGetOrgParamsWithTimeout(timeout time.Duration) *GetOrgParams {
+ return &GetOrgParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetOrgParamsWithContext creates a new GetOrgParams object
+// with the ability to set a context for a request.
+func NewGetOrgParamsWithContext(ctx context.Context) *GetOrgParams {
+ return &GetOrgParams{
+ Context: ctx,
+ }
+}
+
+// NewGetOrgParamsWithHTTPClient creates a new GetOrgParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetOrgParamsWithHTTPClient(client *http.Client) *GetOrgParams {
+ return &GetOrgParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetOrgParams contains all the parameters to send to the API endpoint
+
+ for the get org operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetOrgParams struct {
+
+ /* OrgID.
+
+ ID of the organization to fetch.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get org params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetOrgParams) WithDefaults() *GetOrgParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get org params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetOrgParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get org params
+func (o *GetOrgParams) WithTimeout(timeout time.Duration) *GetOrgParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get org params
+func (o *GetOrgParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get org params
+func (o *GetOrgParams) WithContext(ctx context.Context) *GetOrgParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get org params
+func (o *GetOrgParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get org params
+func (o *GetOrgParams) WithHTTPClient(client *http.Client) *GetOrgParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get org params
+func (o *GetOrgParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithOrgID adds the orgID to the get org params
+func (o *GetOrgParams) WithOrgID(orgID string) *GetOrgParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the get org params
+func (o *GetOrgParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetOrgParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/get_org_pool_parameters.go b/client/organizations/get_org_pool_parameters.go
new file mode 100644
index 00000000..dd2dc817
--- /dev/null
+++ b/client/organizations/get_org_pool_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetOrgPoolParams creates a new GetOrgPoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetOrgPoolParams() *GetOrgPoolParams {
+ return &GetOrgPoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetOrgPoolParamsWithTimeout creates a new GetOrgPoolParams object
+// with the ability to set a timeout on a request.
+func NewGetOrgPoolParamsWithTimeout(timeout time.Duration) *GetOrgPoolParams {
+ return &GetOrgPoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetOrgPoolParamsWithContext creates a new GetOrgPoolParams object
+// with the ability to set a context for a request.
+func NewGetOrgPoolParamsWithContext(ctx context.Context) *GetOrgPoolParams {
+ return &GetOrgPoolParams{
+ Context: ctx,
+ }
+}
+
+// NewGetOrgPoolParamsWithHTTPClient creates a new GetOrgPoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetOrgPoolParamsWithHTTPClient(client *http.Client) *GetOrgPoolParams {
+ return &GetOrgPoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetOrgPoolParams contains all the parameters to send to the API endpoint
+
+ for the get org pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetOrgPoolParams struct {
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ /* PoolID.
+
+ Pool ID.
+ */
+ PoolID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get org pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetOrgPoolParams) WithDefaults() *GetOrgPoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get org pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetOrgPoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get org pool params
+func (o *GetOrgPoolParams) WithTimeout(timeout time.Duration) *GetOrgPoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get org pool params
+func (o *GetOrgPoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get org pool params
+func (o *GetOrgPoolParams) WithContext(ctx context.Context) *GetOrgPoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get org pool params
+func (o *GetOrgPoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get org pool params
+func (o *GetOrgPoolParams) WithHTTPClient(client *http.Client) *GetOrgPoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get org pool params
+func (o *GetOrgPoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithOrgID adds the orgID to the get org pool params
+func (o *GetOrgPoolParams) WithOrgID(orgID string) *GetOrgPoolParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the get org pool params
+func (o *GetOrgPoolParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WithPoolID adds the poolID to the get org pool params
+func (o *GetOrgPoolParams) WithPoolID(poolID string) *GetOrgPoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the get org pool params
+func (o *GetOrgPoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetOrgPoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/get_org_pool_responses.go b/client/organizations/get_org_pool_responses.go
new file mode 100644
index 00000000..dba3ed27
--- /dev/null
+++ b/client/organizations/get_org_pool_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetOrgPoolReader is a Reader for the GetOrgPool structure.
+type GetOrgPoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetOrgPoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetOrgPoolOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetOrgPoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetOrgPoolOK creates a GetOrgPoolOK with default headers values
+func NewGetOrgPoolOK() *GetOrgPoolOK {
+ return &GetOrgPoolOK{}
+}
+
+/*
+GetOrgPoolOK describes a response with status code 200, with default header values.
+
+Pool
+*/
+type GetOrgPoolOK struct {
+ Payload garm_params.Pool
+}
+
+// IsSuccess returns true when this get org pool o k response has a 2xx status code
+func (o *GetOrgPoolOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get org pool o k response has a 3xx status code
+func (o *GetOrgPoolOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get org pool o k response has a 4xx status code
+func (o *GetOrgPoolOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get org pool o k response has a 5xx status code
+func (o *GetOrgPoolOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get org pool o k response a status code equal to that given
+func (o *GetOrgPoolOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get org pool o k response
+func (o *GetOrgPoolOK) Code() int {
+ return 200
+}
+
+func (o *GetOrgPoolOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] getOrgPoolOK %s", 200, payload)
+}
+
+func (o *GetOrgPoolOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] getOrgPoolOK %s", 200, payload)
+}
+
+func (o *GetOrgPoolOK) GetPayload() garm_params.Pool {
+ return o.Payload
+}
+
+func (o *GetOrgPoolOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetOrgPoolDefault creates a GetOrgPoolDefault with default headers values
+func NewGetOrgPoolDefault(code int) *GetOrgPoolDefault {
+ return &GetOrgPoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetOrgPoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetOrgPoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get org pool default response has a 2xx status code
+func (o *GetOrgPoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get org pool default response has a 3xx status code
+func (o *GetOrgPoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get org pool default response has a 4xx status code
+func (o *GetOrgPoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get org pool default response has a 5xx status code
+func (o *GetOrgPoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get org pool default response a status code equal to that given
+func (o *GetOrgPoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get org pool default response
+func (o *GetOrgPoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetOrgPoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] GetOrgPool default %s", o._statusCode, payload)
+}
+
+func (o *GetOrgPoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools/{poolID}][%d] GetOrgPool default %s", o._statusCode, payload)
+}
+
+func (o *GetOrgPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetOrgPoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/get_org_responses.go b/client/organizations/get_org_responses.go
new file mode 100644
index 00000000..2c6df58d
--- /dev/null
+++ b/client/organizations/get_org_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetOrgReader is a Reader for the GetOrg structure.
+type GetOrgReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetOrgReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetOrgOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetOrgDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetOrgOK creates a GetOrgOK with default headers values
+func NewGetOrgOK() *GetOrgOK {
+ return &GetOrgOK{}
+}
+
+/*
+GetOrgOK describes a response with status code 200, with default header values.
+
+Organization
+*/
+type GetOrgOK struct {
+ Payload garm_params.Organization
+}
+
+// IsSuccess returns true when this get org o k response has a 2xx status code
+func (o *GetOrgOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get org o k response has a 3xx status code
+func (o *GetOrgOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get org o k response has a 4xx status code
+func (o *GetOrgOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get org o k response has a 5xx status code
+func (o *GetOrgOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get org o k response a status code equal to that given
+func (o *GetOrgOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get org o k response
+func (o *GetOrgOK) Code() int {
+ return 200
+}
+
+func (o *GetOrgOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}][%d] getOrgOK %s", 200, payload)
+}
+
+func (o *GetOrgOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}][%d] getOrgOK %s", 200, payload)
+}
+
+func (o *GetOrgOK) GetPayload() garm_params.Organization {
+ return o.Payload
+}
+
+func (o *GetOrgOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetOrgDefault creates a GetOrgDefault with default headers values
+func NewGetOrgDefault(code int) *GetOrgDefault {
+ return &GetOrgDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetOrgDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetOrgDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get org default response has a 2xx status code
+func (o *GetOrgDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get org default response has a 3xx status code
+func (o *GetOrgDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get org default response has a 4xx status code
+func (o *GetOrgDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get org default response has a 5xx status code
+func (o *GetOrgDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get org default response a status code equal to that given
+func (o *GetOrgDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get org default response
+func (o *GetOrgDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetOrgDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}][%d] GetOrg default %s", o._statusCode, payload)
+}
+
+func (o *GetOrgDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}][%d] GetOrg default %s", o._statusCode, payload)
+}
+
+func (o *GetOrgDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetOrgDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/get_org_webhook_info_parameters.go b/client/organizations/get_org_webhook_info_parameters.go
new file mode 100644
index 00000000..fe67c584
--- /dev/null
+++ b/client/organizations/get_org_webhook_info_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetOrgWebhookInfoParams creates a new GetOrgWebhookInfoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetOrgWebhookInfoParams() *GetOrgWebhookInfoParams {
+ return &GetOrgWebhookInfoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetOrgWebhookInfoParamsWithTimeout creates a new GetOrgWebhookInfoParams object
+// with the ability to set a timeout on a request.
+func NewGetOrgWebhookInfoParamsWithTimeout(timeout time.Duration) *GetOrgWebhookInfoParams {
+ return &GetOrgWebhookInfoParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetOrgWebhookInfoParamsWithContext creates a new GetOrgWebhookInfoParams object
+// with the ability to set a context for a request.
+func NewGetOrgWebhookInfoParamsWithContext(ctx context.Context) *GetOrgWebhookInfoParams {
+ return &GetOrgWebhookInfoParams{
+ Context: ctx,
+ }
+}
+
+// NewGetOrgWebhookInfoParamsWithHTTPClient creates a new GetOrgWebhookInfoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetOrgWebhookInfoParamsWithHTTPClient(client *http.Client) *GetOrgWebhookInfoParams {
+ return &GetOrgWebhookInfoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetOrgWebhookInfoParams contains all the parameters to send to the API endpoint
+
+ for the get org webhook info operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetOrgWebhookInfoParams struct {
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get org webhook info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetOrgWebhookInfoParams) WithDefaults() *GetOrgWebhookInfoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get org webhook info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetOrgWebhookInfoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) WithTimeout(timeout time.Duration) *GetOrgWebhookInfoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) WithContext(ctx context.Context) *GetOrgWebhookInfoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) WithHTTPClient(client *http.Client) *GetOrgWebhookInfoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithOrgID adds the orgID to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) WithOrgID(orgID string) *GetOrgWebhookInfoParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the get org webhook info params
+func (o *GetOrgWebhookInfoParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetOrgWebhookInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/get_org_webhook_info_responses.go b/client/organizations/get_org_webhook_info_responses.go
new file mode 100644
index 00000000..9cebf511
--- /dev/null
+++ b/client/organizations/get_org_webhook_info_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetOrgWebhookInfoReader is a Reader for the GetOrgWebhookInfo structure.
+type GetOrgWebhookInfoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetOrgWebhookInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetOrgWebhookInfoOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetOrgWebhookInfoDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetOrgWebhookInfoOK creates a GetOrgWebhookInfoOK with default headers values
+func NewGetOrgWebhookInfoOK() *GetOrgWebhookInfoOK {
+ return &GetOrgWebhookInfoOK{}
+}
+
+/*
+GetOrgWebhookInfoOK describes a response with status code 200, with default header values.
+
+HookInfo
+*/
+type GetOrgWebhookInfoOK struct {
+ Payload garm_params.HookInfo
+}
+
+// IsSuccess returns true when this get org webhook info o k response has a 2xx status code
+func (o *GetOrgWebhookInfoOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get org webhook info o k response has a 3xx status code
+func (o *GetOrgWebhookInfoOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get org webhook info o k response has a 4xx status code
+func (o *GetOrgWebhookInfoOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get org webhook info o k response has a 5xx status code
+func (o *GetOrgWebhookInfoOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get org webhook info o k response a status code equal to that given
+func (o *GetOrgWebhookInfoOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get org webhook info o k response
+func (o *GetOrgWebhookInfoOK) Code() int {
+ return 200
+}
+
+func (o *GetOrgWebhookInfoOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/webhook][%d] getOrgWebhookInfoOK %s", 200, payload)
+}
+
+func (o *GetOrgWebhookInfoOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/webhook][%d] getOrgWebhookInfoOK %s", 200, payload)
+}
+
+func (o *GetOrgWebhookInfoOK) GetPayload() garm_params.HookInfo {
+ return o.Payload
+}
+
+func (o *GetOrgWebhookInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetOrgWebhookInfoDefault creates a GetOrgWebhookInfoDefault with default headers values
+func NewGetOrgWebhookInfoDefault(code int) *GetOrgWebhookInfoDefault {
+ return &GetOrgWebhookInfoDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetOrgWebhookInfoDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetOrgWebhookInfoDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get org webhook info default response has a 2xx status code
+func (o *GetOrgWebhookInfoDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get org webhook info default response has a 3xx status code
+func (o *GetOrgWebhookInfoDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get org webhook info default response has a 4xx status code
+func (o *GetOrgWebhookInfoDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get org webhook info default response has a 5xx status code
+func (o *GetOrgWebhookInfoDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get org webhook info default response a status code equal to that given
+func (o *GetOrgWebhookInfoDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get org webhook info default response
+func (o *GetOrgWebhookInfoDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetOrgWebhookInfoDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/webhook][%d] GetOrgWebhookInfo default %s", o._statusCode, payload)
+}
+
+func (o *GetOrgWebhookInfoDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/webhook][%d] GetOrgWebhookInfo default %s", o._statusCode, payload)
+}
+
+func (o *GetOrgWebhookInfoDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetOrgWebhookInfoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/install_org_webhook_parameters.go b/client/organizations/install_org_webhook_parameters.go
new file mode 100644
index 00000000..b28de742
--- /dev/null
+++ b/client/organizations/install_org_webhook_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewInstallOrgWebhookParams creates a new InstallOrgWebhookParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewInstallOrgWebhookParams() *InstallOrgWebhookParams {
+ return &InstallOrgWebhookParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewInstallOrgWebhookParamsWithTimeout creates a new InstallOrgWebhookParams object
+// with the ability to set a timeout on a request.
+func NewInstallOrgWebhookParamsWithTimeout(timeout time.Duration) *InstallOrgWebhookParams {
+ return &InstallOrgWebhookParams{
+ timeout: timeout,
+ }
+}
+
+// NewInstallOrgWebhookParamsWithContext creates a new InstallOrgWebhookParams object
+// with the ability to set a context for a request.
+func NewInstallOrgWebhookParamsWithContext(ctx context.Context) *InstallOrgWebhookParams {
+ return &InstallOrgWebhookParams{
+ Context: ctx,
+ }
+}
+
+// NewInstallOrgWebhookParamsWithHTTPClient creates a new InstallOrgWebhookParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewInstallOrgWebhookParamsWithHTTPClient(client *http.Client) *InstallOrgWebhookParams {
+ return &InstallOrgWebhookParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+InstallOrgWebhookParams contains all the parameters to send to the API endpoint
+
+ for the install org webhook operation.
+
+ Typically these are written to a http.Request.
+*/
+type InstallOrgWebhookParams struct {
+
+ /* Body.
+
+ Parameters used when creating the organization webhook.
+ */
+ Body garm_params.InstallWebhookParams
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the install org webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *InstallOrgWebhookParams) WithDefaults() *InstallOrgWebhookParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the install org webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *InstallOrgWebhookParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the install org webhook params
+func (o *InstallOrgWebhookParams) WithTimeout(timeout time.Duration) *InstallOrgWebhookParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the install org webhook params
+func (o *InstallOrgWebhookParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the install org webhook params
+func (o *InstallOrgWebhookParams) WithContext(ctx context.Context) *InstallOrgWebhookParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the install org webhook params
+func (o *InstallOrgWebhookParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the install org webhook params
+func (o *InstallOrgWebhookParams) WithHTTPClient(client *http.Client) *InstallOrgWebhookParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the install org webhook params
+func (o *InstallOrgWebhookParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the install org webhook params
+func (o *InstallOrgWebhookParams) WithBody(body garm_params.InstallWebhookParams) *InstallOrgWebhookParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the install org webhook params
+func (o *InstallOrgWebhookParams) SetBody(body garm_params.InstallWebhookParams) {
+ o.Body = body
+}
+
+// WithOrgID adds the orgID to the install org webhook params
+func (o *InstallOrgWebhookParams) WithOrgID(orgID string) *InstallOrgWebhookParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the install org webhook params
+func (o *InstallOrgWebhookParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *InstallOrgWebhookParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/install_org_webhook_responses.go b/client/organizations/install_org_webhook_responses.go
new file mode 100644
index 00000000..338b7c08
--- /dev/null
+++ b/client/organizations/install_org_webhook_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// InstallOrgWebhookReader is a Reader for the InstallOrgWebhook structure.
+type InstallOrgWebhookReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *InstallOrgWebhookReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewInstallOrgWebhookOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewInstallOrgWebhookDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewInstallOrgWebhookOK creates a InstallOrgWebhookOK with default headers values
+func NewInstallOrgWebhookOK() *InstallOrgWebhookOK {
+ return &InstallOrgWebhookOK{}
+}
+
+/*
+InstallOrgWebhookOK describes a response with status code 200, with default header values.
+
+HookInfo
+*/
+type InstallOrgWebhookOK struct {
+ Payload garm_params.HookInfo
+}
+
+// IsSuccess returns true when this install org webhook o k response has a 2xx status code
+func (o *InstallOrgWebhookOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this install org webhook o k response has a 3xx status code
+func (o *InstallOrgWebhookOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this install org webhook o k response has a 4xx status code
+func (o *InstallOrgWebhookOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this install org webhook o k response has a 5xx status code
+func (o *InstallOrgWebhookOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this install org webhook o k response a status code equal to that given
+func (o *InstallOrgWebhookOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the install org webhook o k response
+func (o *InstallOrgWebhookOK) Code() int {
+ return 200
+}
+
+func (o *InstallOrgWebhookOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/webhook][%d] installOrgWebhookOK %s", 200, payload)
+}
+
+func (o *InstallOrgWebhookOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/webhook][%d] installOrgWebhookOK %s", 200, payload)
+}
+
+func (o *InstallOrgWebhookOK) GetPayload() garm_params.HookInfo {
+ return o.Payload
+}
+
+func (o *InstallOrgWebhookOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewInstallOrgWebhookDefault creates a InstallOrgWebhookDefault with default headers values
+func NewInstallOrgWebhookDefault(code int) *InstallOrgWebhookDefault {
+ return &InstallOrgWebhookDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+InstallOrgWebhookDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type InstallOrgWebhookDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this install org webhook default response has a 2xx status code
+func (o *InstallOrgWebhookDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this install org webhook default response has a 3xx status code
+func (o *InstallOrgWebhookDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this install org webhook default response has a 4xx status code
+func (o *InstallOrgWebhookDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this install org webhook default response has a 5xx status code
+func (o *InstallOrgWebhookDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this install org webhook default response a status code equal to that given
+func (o *InstallOrgWebhookDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the install org webhook default response
+func (o *InstallOrgWebhookDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *InstallOrgWebhookDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/webhook][%d] InstallOrgWebhook default %s", o._statusCode, payload)
+}
+
+func (o *InstallOrgWebhookDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /organizations/{orgID}/webhook][%d] InstallOrgWebhook default %s", o._statusCode, payload)
+}
+
+func (o *InstallOrgWebhookDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *InstallOrgWebhookDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/list_org_instances_parameters.go b/client/organizations/list_org_instances_parameters.go
new file mode 100644
index 00000000..e7167a7e
--- /dev/null
+++ b/client/organizations/list_org_instances_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListOrgInstancesParams creates a new ListOrgInstancesParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListOrgInstancesParams() *ListOrgInstancesParams {
+ return &ListOrgInstancesParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListOrgInstancesParamsWithTimeout creates a new ListOrgInstancesParams object
+// with the ability to set a timeout on a request.
+func NewListOrgInstancesParamsWithTimeout(timeout time.Duration) *ListOrgInstancesParams {
+ return &ListOrgInstancesParams{
+ timeout: timeout,
+ }
+}
+
+// NewListOrgInstancesParamsWithContext creates a new ListOrgInstancesParams object
+// with the ability to set a context for a request.
+func NewListOrgInstancesParamsWithContext(ctx context.Context) *ListOrgInstancesParams {
+ return &ListOrgInstancesParams{
+ Context: ctx,
+ }
+}
+
+// NewListOrgInstancesParamsWithHTTPClient creates a new ListOrgInstancesParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListOrgInstancesParamsWithHTTPClient(client *http.Client) *ListOrgInstancesParams {
+ return &ListOrgInstancesParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListOrgInstancesParams contains all the parameters to send to the API endpoint
+
+ for the list org instances operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListOrgInstancesParams struct {
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list org instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListOrgInstancesParams) WithDefaults() *ListOrgInstancesParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list org instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListOrgInstancesParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list org instances params
+func (o *ListOrgInstancesParams) WithTimeout(timeout time.Duration) *ListOrgInstancesParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list org instances params
+func (o *ListOrgInstancesParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list org instances params
+func (o *ListOrgInstancesParams) WithContext(ctx context.Context) *ListOrgInstancesParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list org instances params
+func (o *ListOrgInstancesParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list org instances params
+func (o *ListOrgInstancesParams) WithHTTPClient(client *http.Client) *ListOrgInstancesParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list org instances params
+func (o *ListOrgInstancesParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithOrgID adds the orgID to the list org instances params
+func (o *ListOrgInstancesParams) WithOrgID(orgID string) *ListOrgInstancesParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the list org instances params
+func (o *ListOrgInstancesParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListOrgInstancesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/list_org_instances_responses.go b/client/organizations/list_org_instances_responses.go
new file mode 100644
index 00000000..2c6ad86e
--- /dev/null
+++ b/client/organizations/list_org_instances_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListOrgInstancesReader is a Reader for the ListOrgInstances structure.
+type ListOrgInstancesReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListOrgInstancesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListOrgInstancesOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListOrgInstancesDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListOrgInstancesOK creates a ListOrgInstancesOK with default headers values
+func NewListOrgInstancesOK() *ListOrgInstancesOK {
+ return &ListOrgInstancesOK{}
+}
+
+/*
+ListOrgInstancesOK describes a response with status code 200, with default header values.
+
+Instances
+*/
+type ListOrgInstancesOK struct {
+ Payload garm_params.Instances
+}
+
+// IsSuccess returns true when this list org instances o k response has a 2xx status code
+func (o *ListOrgInstancesOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list org instances o k response has a 3xx status code
+func (o *ListOrgInstancesOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list org instances o k response has a 4xx status code
+func (o *ListOrgInstancesOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list org instances o k response has a 5xx status code
+func (o *ListOrgInstancesOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list org instances o k response a status code equal to that given
+func (o *ListOrgInstancesOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list org instances o k response
+func (o *ListOrgInstancesOK) Code() int {
+ return 200
+}
+
+func (o *ListOrgInstancesOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] listOrgInstancesOK %s", 200, payload)
+}
+
+func (o *ListOrgInstancesOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] listOrgInstancesOK %s", 200, payload)
+}
+
+func (o *ListOrgInstancesOK) GetPayload() garm_params.Instances {
+ return o.Payload
+}
+
+func (o *ListOrgInstancesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListOrgInstancesDefault creates a ListOrgInstancesDefault with default headers values
+func NewListOrgInstancesDefault(code int) *ListOrgInstancesDefault {
+ return &ListOrgInstancesDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListOrgInstancesDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListOrgInstancesDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list org instances default response has a 2xx status code
+func (o *ListOrgInstancesDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list org instances default response has a 3xx status code
+func (o *ListOrgInstancesDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list org instances default response has a 4xx status code
+func (o *ListOrgInstancesDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list org instances default response has a 5xx status code
+func (o *ListOrgInstancesDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list org instances default response a status code equal to that given
+func (o *ListOrgInstancesDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list org instances default response
+func (o *ListOrgInstancesDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListOrgInstancesDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] ListOrgInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListOrgInstancesDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/instances][%d] ListOrgInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListOrgInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListOrgInstancesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/list_org_pools_parameters.go b/client/organizations/list_org_pools_parameters.go
new file mode 100644
index 00000000..15a4284e
--- /dev/null
+++ b/client/organizations/list_org_pools_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListOrgPoolsParams creates a new ListOrgPoolsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListOrgPoolsParams() *ListOrgPoolsParams {
+ return &ListOrgPoolsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListOrgPoolsParamsWithTimeout creates a new ListOrgPoolsParams object
+// with the ability to set a timeout on a request.
+func NewListOrgPoolsParamsWithTimeout(timeout time.Duration) *ListOrgPoolsParams {
+ return &ListOrgPoolsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListOrgPoolsParamsWithContext creates a new ListOrgPoolsParams object
+// with the ability to set a context for a request.
+func NewListOrgPoolsParamsWithContext(ctx context.Context) *ListOrgPoolsParams {
+ return &ListOrgPoolsParams{
+ Context: ctx,
+ }
+}
+
+// NewListOrgPoolsParamsWithHTTPClient creates a new ListOrgPoolsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListOrgPoolsParamsWithHTTPClient(client *http.Client) *ListOrgPoolsParams {
+ return &ListOrgPoolsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListOrgPoolsParams contains all the parameters to send to the API endpoint
+
+ for the list org pools operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListOrgPoolsParams struct {
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list org pools params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListOrgPoolsParams) WithDefaults() *ListOrgPoolsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list org pools params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListOrgPoolsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list org pools params
+func (o *ListOrgPoolsParams) WithTimeout(timeout time.Duration) *ListOrgPoolsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list org pools params
+func (o *ListOrgPoolsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list org pools params
+func (o *ListOrgPoolsParams) WithContext(ctx context.Context) *ListOrgPoolsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list org pools params
+func (o *ListOrgPoolsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list org pools params
+func (o *ListOrgPoolsParams) WithHTTPClient(client *http.Client) *ListOrgPoolsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list org pools params
+func (o *ListOrgPoolsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithOrgID adds the orgID to the list org pools params
+func (o *ListOrgPoolsParams) WithOrgID(orgID string) *ListOrgPoolsParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the list org pools params
+func (o *ListOrgPoolsParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListOrgPoolsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/list_org_pools_responses.go b/client/organizations/list_org_pools_responses.go
new file mode 100644
index 00000000..cdbe7f7b
--- /dev/null
+++ b/client/organizations/list_org_pools_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListOrgPoolsReader is a Reader for the ListOrgPools structure.
+type ListOrgPoolsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListOrgPoolsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListOrgPoolsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListOrgPoolsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListOrgPoolsOK creates a ListOrgPoolsOK with default headers values
+func NewListOrgPoolsOK() *ListOrgPoolsOK {
+ return &ListOrgPoolsOK{}
+}
+
+/*
+ListOrgPoolsOK describes a response with status code 200, with default header values.
+
+Pools
+*/
+type ListOrgPoolsOK struct {
+ Payload garm_params.Pools
+}
+
+// IsSuccess returns true when this list org pools o k response has a 2xx status code
+func (o *ListOrgPoolsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list org pools o k response has a 3xx status code
+func (o *ListOrgPoolsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list org pools o k response has a 4xx status code
+func (o *ListOrgPoolsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list org pools o k response has a 5xx status code
+func (o *ListOrgPoolsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list org pools o k response a status code equal to that given
+func (o *ListOrgPoolsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list org pools o k response
+func (o *ListOrgPoolsOK) Code() int {
+ return 200
+}
+
+func (o *ListOrgPoolsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] listOrgPoolsOK %s", 200, payload)
+}
+
+func (o *ListOrgPoolsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] listOrgPoolsOK %s", 200, payload)
+}
+
+func (o *ListOrgPoolsOK) GetPayload() garm_params.Pools {
+ return o.Payload
+}
+
+func (o *ListOrgPoolsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListOrgPoolsDefault creates a ListOrgPoolsDefault with default headers values
+func NewListOrgPoolsDefault(code int) *ListOrgPoolsDefault {
+ return &ListOrgPoolsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListOrgPoolsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListOrgPoolsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list org pools default response has a 2xx status code
+func (o *ListOrgPoolsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list org pools default response has a 3xx status code
+func (o *ListOrgPoolsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list org pools default response has a 4xx status code
+func (o *ListOrgPoolsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list org pools default response has a 5xx status code
+func (o *ListOrgPoolsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list org pools default response a status code equal to that given
+func (o *ListOrgPoolsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list org pools default response
+func (o *ListOrgPoolsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListOrgPoolsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] ListOrgPools default %s", o._statusCode, payload)
+}
+
+func (o *ListOrgPoolsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/pools][%d] ListOrgPools default %s", o._statusCode, payload)
+}
+
+func (o *ListOrgPoolsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListOrgPoolsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/list_org_scale_sets_parameters.go b/client/organizations/list_org_scale_sets_parameters.go
new file mode 100644
index 00000000..711ec788
--- /dev/null
+++ b/client/organizations/list_org_scale_sets_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListOrgScaleSetsParams creates a new ListOrgScaleSetsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListOrgScaleSetsParams() *ListOrgScaleSetsParams {
+ return &ListOrgScaleSetsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListOrgScaleSetsParamsWithTimeout creates a new ListOrgScaleSetsParams object
+// with the ability to set a timeout on a request.
+func NewListOrgScaleSetsParamsWithTimeout(timeout time.Duration) *ListOrgScaleSetsParams {
+ return &ListOrgScaleSetsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListOrgScaleSetsParamsWithContext creates a new ListOrgScaleSetsParams object
+// with the ability to set a context for a request.
+func NewListOrgScaleSetsParamsWithContext(ctx context.Context) *ListOrgScaleSetsParams {
+ return &ListOrgScaleSetsParams{
+ Context: ctx,
+ }
+}
+
+// NewListOrgScaleSetsParamsWithHTTPClient creates a new ListOrgScaleSetsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListOrgScaleSetsParamsWithHTTPClient(client *http.Client) *ListOrgScaleSetsParams {
+ return &ListOrgScaleSetsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListOrgScaleSetsParams contains all the parameters to send to the API endpoint
+
+ for the list org scale sets operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListOrgScaleSetsParams struct {
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list org scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListOrgScaleSetsParams) WithDefaults() *ListOrgScaleSetsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list org scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListOrgScaleSetsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list org scale sets params
+func (o *ListOrgScaleSetsParams) WithTimeout(timeout time.Duration) *ListOrgScaleSetsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list org scale sets params
+func (o *ListOrgScaleSetsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list org scale sets params
+func (o *ListOrgScaleSetsParams) WithContext(ctx context.Context) *ListOrgScaleSetsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list org scale sets params
+func (o *ListOrgScaleSetsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list org scale sets params
+func (o *ListOrgScaleSetsParams) WithHTTPClient(client *http.Client) *ListOrgScaleSetsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list org scale sets params
+func (o *ListOrgScaleSetsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithOrgID adds the orgID to the list org scale sets params
+func (o *ListOrgScaleSetsParams) WithOrgID(orgID string) *ListOrgScaleSetsParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the list org scale sets params
+func (o *ListOrgScaleSetsParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListOrgScaleSetsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/list_org_scale_sets_responses.go b/client/organizations/list_org_scale_sets_responses.go
new file mode 100644
index 00000000..0b470fa1
--- /dev/null
+++ b/client/organizations/list_org_scale_sets_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListOrgScaleSetsReader is a Reader for the ListOrgScaleSets structure.
+type ListOrgScaleSetsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListOrgScaleSetsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListOrgScaleSetsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListOrgScaleSetsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListOrgScaleSetsOK creates a ListOrgScaleSetsOK with default headers values
+func NewListOrgScaleSetsOK() *ListOrgScaleSetsOK {
+ return &ListOrgScaleSetsOK{}
+}
+
+/*
+ListOrgScaleSetsOK describes a response with status code 200, with default header values.
+
+ScaleSets
+*/
+type ListOrgScaleSetsOK struct {
+ Payload garm_params.ScaleSets
+}
+
+// IsSuccess returns true when this list org scale sets o k response has a 2xx status code
+func (o *ListOrgScaleSetsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list org scale sets o k response has a 3xx status code
+func (o *ListOrgScaleSetsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list org scale sets o k response has a 4xx status code
+func (o *ListOrgScaleSetsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list org scale sets o k response has a 5xx status code
+func (o *ListOrgScaleSetsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list org scale sets o k response a status code equal to that given
+func (o *ListOrgScaleSetsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list org scale sets o k response
+func (o *ListOrgScaleSetsOK) Code() int {
+ return 200
+}
+
+func (o *ListOrgScaleSetsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/scalesets][%d] listOrgScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListOrgScaleSetsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/scalesets][%d] listOrgScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListOrgScaleSetsOK) GetPayload() garm_params.ScaleSets {
+ return o.Payload
+}
+
+func (o *ListOrgScaleSetsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListOrgScaleSetsDefault creates a ListOrgScaleSetsDefault with default headers values
+func NewListOrgScaleSetsDefault(code int) *ListOrgScaleSetsDefault {
+ return &ListOrgScaleSetsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListOrgScaleSetsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListOrgScaleSetsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list org scale sets default response has a 2xx status code
+func (o *ListOrgScaleSetsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list org scale sets default response has a 3xx status code
+func (o *ListOrgScaleSetsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list org scale sets default response has a 4xx status code
+func (o *ListOrgScaleSetsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list org scale sets default response has a 5xx status code
+func (o *ListOrgScaleSetsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list org scale sets default response a status code equal to that given
+func (o *ListOrgScaleSetsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list org scale sets default response
+func (o *ListOrgScaleSetsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListOrgScaleSetsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/scalesets][%d] ListOrgScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListOrgScaleSetsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations/{orgID}/scalesets][%d] ListOrgScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListOrgScaleSetsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListOrgScaleSetsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/list_orgs_parameters.go b/client/organizations/list_orgs_parameters.go
new file mode 100644
index 00000000..af4c19c8
--- /dev/null
+++ b/client/organizations/list_orgs_parameters.go
@@ -0,0 +1,197 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListOrgsParams creates a new ListOrgsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListOrgsParams() *ListOrgsParams {
+ return &ListOrgsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListOrgsParamsWithTimeout creates a new ListOrgsParams object
+// with the ability to set a timeout on a request.
+func NewListOrgsParamsWithTimeout(timeout time.Duration) *ListOrgsParams {
+ return &ListOrgsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListOrgsParamsWithContext creates a new ListOrgsParams object
+// with the ability to set a context for a request.
+func NewListOrgsParamsWithContext(ctx context.Context) *ListOrgsParams {
+ return &ListOrgsParams{
+ Context: ctx,
+ }
+}
+
+// NewListOrgsParamsWithHTTPClient creates a new ListOrgsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListOrgsParamsWithHTTPClient(client *http.Client) *ListOrgsParams {
+ return &ListOrgsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListOrgsParams contains all the parameters to send to the API endpoint
+
+ for the list orgs operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListOrgsParams struct {
+
+ /* Endpoint.
+
+ Exact endpoint name to filter by
+ */
+ Endpoint *string
+
+ /* Name.
+
+ Exact organization name to filter by
+ */
+ Name *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list orgs params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListOrgsParams) WithDefaults() *ListOrgsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list orgs params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListOrgsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list orgs params
+func (o *ListOrgsParams) WithTimeout(timeout time.Duration) *ListOrgsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list orgs params
+func (o *ListOrgsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list orgs params
+func (o *ListOrgsParams) WithContext(ctx context.Context) *ListOrgsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list orgs params
+func (o *ListOrgsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list orgs params
+func (o *ListOrgsParams) WithHTTPClient(client *http.Client) *ListOrgsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list orgs params
+func (o *ListOrgsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEndpoint adds the endpoint to the list orgs params
+func (o *ListOrgsParams) WithEndpoint(endpoint *string) *ListOrgsParams {
+ o.SetEndpoint(endpoint)
+ return o
+}
+
+// SetEndpoint adds the endpoint to the list orgs params
+func (o *ListOrgsParams) SetEndpoint(endpoint *string) {
+ o.Endpoint = endpoint
+}
+
+// WithName adds the name to the list orgs params
+func (o *ListOrgsParams) WithName(name *string) *ListOrgsParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the list orgs params
+func (o *ListOrgsParams) SetName(name *string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListOrgsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.Endpoint != nil {
+
+ // query param endpoint
+ var qrEndpoint string
+
+ if o.Endpoint != nil {
+ qrEndpoint = *o.Endpoint
+ }
+ qEndpoint := qrEndpoint
+ if qEndpoint != "" {
+
+ if err := r.SetQueryParam("endpoint", qEndpoint); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Name != nil {
+
+ // query param name
+ var qrName string
+
+ if o.Name != nil {
+ qrName = *o.Name
+ }
+ qName := qrName
+ if qName != "" {
+
+ if err := r.SetQueryParam("name", qName); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/list_orgs_responses.go b/client/organizations/list_orgs_responses.go
new file mode 100644
index 00000000..c72f11cb
--- /dev/null
+++ b/client/organizations/list_orgs_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListOrgsReader is a Reader for the ListOrgs structure.
+type ListOrgsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListOrgsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListOrgsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListOrgsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListOrgsOK creates a ListOrgsOK with default headers values
+func NewListOrgsOK() *ListOrgsOK {
+ return &ListOrgsOK{}
+}
+
+/*
+ListOrgsOK describes a response with status code 200, with default header values.
+
+Organizations
+*/
+type ListOrgsOK struct {
+ Payload garm_params.Organizations
+}
+
+// IsSuccess returns true when this list orgs o k response has a 2xx status code
+func (o *ListOrgsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list orgs o k response has a 3xx status code
+func (o *ListOrgsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list orgs o k response has a 4xx status code
+func (o *ListOrgsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list orgs o k response has a 5xx status code
+func (o *ListOrgsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list orgs o k response a status code equal to that given
+func (o *ListOrgsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list orgs o k response
+func (o *ListOrgsOK) Code() int {
+ return 200
+}
+
+func (o *ListOrgsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations][%d] listOrgsOK %s", 200, payload)
+}
+
+func (o *ListOrgsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations][%d] listOrgsOK %s", 200, payload)
+}
+
+func (o *ListOrgsOK) GetPayload() garm_params.Organizations {
+ return o.Payload
+}
+
+func (o *ListOrgsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListOrgsDefault creates a ListOrgsDefault with default headers values
+func NewListOrgsDefault(code int) *ListOrgsDefault {
+ return &ListOrgsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListOrgsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListOrgsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list orgs default response has a 2xx status code
+func (o *ListOrgsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list orgs default response has a 3xx status code
+func (o *ListOrgsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list orgs default response has a 4xx status code
+func (o *ListOrgsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list orgs default response has a 5xx status code
+func (o *ListOrgsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list orgs default response a status code equal to that given
+func (o *ListOrgsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list orgs default response
+func (o *ListOrgsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListOrgsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations][%d] ListOrgs default %s", o._statusCode, payload)
+}
+
+func (o *ListOrgsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /organizations][%d] ListOrgs default %s", o._statusCode, payload)
+}
+
+func (o *ListOrgsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListOrgsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/organizations_client.go b/client/organizations/organizations_client.go
new file mode 100644
index 00000000..cd3e1211
--- /dev/null
+++ b/client/organizations/organizations_client.go
@@ -0,0 +1,687 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new organizations API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new organizations API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new organizations API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for organizations API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ CreateOrg(params *CreateOrgParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateOrgOK, error)
+
+ CreateOrgPool(params *CreateOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateOrgPoolOK, error)
+
+ CreateOrgScaleSet(params *CreateOrgScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateOrgScaleSetOK, error)
+
+ DeleteOrg(params *DeleteOrgParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ DeleteOrgPool(params *DeleteOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ GetOrg(params *GetOrgParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetOrgOK, error)
+
+ GetOrgPool(params *GetOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetOrgPoolOK, error)
+
+ GetOrgWebhookInfo(params *GetOrgWebhookInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetOrgWebhookInfoOK, error)
+
+ InstallOrgWebhook(params *InstallOrgWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*InstallOrgWebhookOK, error)
+
+ ListOrgInstances(params *ListOrgInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgInstancesOK, error)
+
+ ListOrgPools(params *ListOrgPoolsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgPoolsOK, error)
+
+ ListOrgScaleSets(params *ListOrgScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgScaleSetsOK, error)
+
+ ListOrgs(params *ListOrgsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgsOK, error)
+
+ UninstallOrgWebhook(params *UninstallOrgWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ UpdateOrg(params *UpdateOrgParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateOrgOK, error)
+
+ UpdateOrgPool(params *UpdateOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateOrgPoolOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+CreateOrg creates organization with the parameters given
+*/
+func (a *Client) CreateOrg(params *CreateOrgParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateOrgOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateOrgParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateOrg",
+ Method: "POST",
+ PathPattern: "/organizations",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateOrgReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateOrgOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateOrgDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+CreateOrgPool creates organization pool with the parameters given
+*/
+func (a *Client) CreateOrgPool(params *CreateOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateOrgPoolOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateOrgPoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateOrgPool",
+ Method: "POST",
+ PathPattern: "/organizations/{orgID}/pools",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateOrgPoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateOrgPoolOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateOrgPoolDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+CreateOrgScaleSet creates organization scale set with the parameters given
+*/
+func (a *Client) CreateOrgScaleSet(params *CreateOrgScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateOrgScaleSetOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateOrgScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateOrgScaleSet",
+ Method: "POST",
+ PathPattern: "/organizations/{orgID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateOrgScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateOrgScaleSetOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateOrgScaleSetDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+DeleteOrg deletes organization by ID
+*/
+func (a *Client) DeleteOrg(params *DeleteOrgParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteOrgParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteOrg",
+ Method: "DELETE",
+ PathPattern: "/organizations/{orgID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteOrgReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+DeleteOrgPool deletes organization pool by ID
+*/
+func (a *Client) DeleteOrgPool(params *DeleteOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteOrgPoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteOrgPool",
+ Method: "DELETE",
+ PathPattern: "/organizations/{orgID}/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteOrgPoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+GetOrg gets organization by ID
+*/
+func (a *Client) GetOrg(params *GetOrgParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetOrgOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetOrgParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetOrg",
+ Method: "GET",
+ PathPattern: "/organizations/{orgID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetOrgReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetOrgOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetOrgDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+GetOrgPool gets organization pool by ID
+*/
+func (a *Client) GetOrgPool(params *GetOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetOrgPoolOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetOrgPoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetOrgPool",
+ Method: "GET",
+ PathPattern: "/organizations/{orgID}/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetOrgPoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetOrgPoolOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetOrgPoolDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+GetOrgWebhookInfo gets information about the g a r m installed webhook on an organization
+*/
+func (a *Client) GetOrgWebhookInfo(params *GetOrgWebhookInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetOrgWebhookInfoOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetOrgWebhookInfoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetOrgWebhookInfo",
+ Method: "GET",
+ PathPattern: "/organizations/{orgID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetOrgWebhookInfoReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetOrgWebhookInfoOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetOrgWebhookInfoDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ InstallOrgWebhook Install the GARM webhook for an organization. The secret configured on the organization will
+
+be used to validate the requests.
+*/
+func (a *Client) InstallOrgWebhook(params *InstallOrgWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*InstallOrgWebhookOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewInstallOrgWebhookParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "InstallOrgWebhook",
+ Method: "POST",
+ PathPattern: "/organizations/{orgID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &InstallOrgWebhookReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*InstallOrgWebhookOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*InstallOrgWebhookDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListOrgInstances lists organization instances
+*/
+func (a *Client) ListOrgInstances(params *ListOrgInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgInstancesOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListOrgInstancesParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListOrgInstances",
+ Method: "GET",
+ PathPattern: "/organizations/{orgID}/instances",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListOrgInstancesReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListOrgInstancesOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListOrgInstancesDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListOrgPools lists organization pools
+*/
+func (a *Client) ListOrgPools(params *ListOrgPoolsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgPoolsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListOrgPoolsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListOrgPools",
+ Method: "GET",
+ PathPattern: "/organizations/{orgID}/pools",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListOrgPoolsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListOrgPoolsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListOrgPoolsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListOrgScaleSets lists organization scale sets
+*/
+func (a *Client) ListOrgScaleSets(params *ListOrgScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgScaleSetsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListOrgScaleSetsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListOrgScaleSets",
+ Method: "GET",
+ PathPattern: "/organizations/{orgID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListOrgScaleSetsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListOrgScaleSetsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListOrgScaleSetsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListOrgs lists organizations
+*/
+func (a *Client) ListOrgs(params *ListOrgsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListOrgsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListOrgsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListOrgs",
+ Method: "GET",
+ PathPattern: "/organizations",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListOrgsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListOrgsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListOrgsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UninstallOrgWebhook uninstalls organization webhook
+*/
+func (a *Client) UninstallOrgWebhook(params *UninstallOrgWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUninstallOrgWebhookParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UninstallOrgWebhook",
+ Method: "DELETE",
+ PathPattern: "/organizations/{orgID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UninstallOrgWebhookReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+UpdateOrg updates organization with the parameters given
+*/
+func (a *Client) UpdateOrg(params *UpdateOrgParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateOrgOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateOrgParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateOrg",
+ Method: "PUT",
+ PathPattern: "/organizations/{orgID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateOrgReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateOrgOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateOrgDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UpdateOrgPool updates organization pool with the parameters given
+*/
+func (a *Client) UpdateOrgPool(params *UpdateOrgPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateOrgPoolOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateOrgPoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateOrgPool",
+ Method: "PUT",
+ PathPattern: "/organizations/{orgID}/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateOrgPoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateOrgPoolOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateOrgPoolDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/organizations/uninstall_org_webhook_parameters.go b/client/organizations/uninstall_org_webhook_parameters.go
new file mode 100644
index 00000000..3914d9c7
--- /dev/null
+++ b/client/organizations/uninstall_org_webhook_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewUninstallOrgWebhookParams creates a new UninstallOrgWebhookParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUninstallOrgWebhookParams() *UninstallOrgWebhookParams {
+ return &UninstallOrgWebhookParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUninstallOrgWebhookParamsWithTimeout creates a new UninstallOrgWebhookParams object
+// with the ability to set a timeout on a request.
+func NewUninstallOrgWebhookParamsWithTimeout(timeout time.Duration) *UninstallOrgWebhookParams {
+ return &UninstallOrgWebhookParams{
+ timeout: timeout,
+ }
+}
+
+// NewUninstallOrgWebhookParamsWithContext creates a new UninstallOrgWebhookParams object
+// with the ability to set a context for a request.
+func NewUninstallOrgWebhookParamsWithContext(ctx context.Context) *UninstallOrgWebhookParams {
+ return &UninstallOrgWebhookParams{
+ Context: ctx,
+ }
+}
+
+// NewUninstallOrgWebhookParamsWithHTTPClient creates a new UninstallOrgWebhookParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUninstallOrgWebhookParamsWithHTTPClient(client *http.Client) *UninstallOrgWebhookParams {
+ return &UninstallOrgWebhookParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UninstallOrgWebhookParams contains all the parameters to send to the API endpoint
+
+ for the uninstall org webhook operation.
+
+ Typically these are written to a http.Request.
+*/
+type UninstallOrgWebhookParams struct {
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the uninstall org webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UninstallOrgWebhookParams) WithDefaults() *UninstallOrgWebhookParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the uninstall org webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UninstallOrgWebhookParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) WithTimeout(timeout time.Duration) *UninstallOrgWebhookParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) WithContext(ctx context.Context) *UninstallOrgWebhookParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) WithHTTPClient(client *http.Client) *UninstallOrgWebhookParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithOrgID adds the orgID to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) WithOrgID(orgID string) *UninstallOrgWebhookParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the uninstall org webhook params
+func (o *UninstallOrgWebhookParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UninstallOrgWebhookParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/uninstall_org_webhook_responses.go b/client/organizations/uninstall_org_webhook_responses.go
new file mode 100644
index 00000000..6f1bceac
--- /dev/null
+++ b/client/organizations/uninstall_org_webhook_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// UninstallOrgWebhookReader is a Reader for the UninstallOrgWebhook structure.
+type UninstallOrgWebhookReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UninstallOrgWebhookReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewUninstallOrgWebhookDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewUninstallOrgWebhookDefault creates a UninstallOrgWebhookDefault with default headers values
+func NewUninstallOrgWebhookDefault(code int) *UninstallOrgWebhookDefault {
+ return &UninstallOrgWebhookDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UninstallOrgWebhookDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UninstallOrgWebhookDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this uninstall org webhook default response has a 2xx status code
+func (o *UninstallOrgWebhookDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this uninstall org webhook default response has a 3xx status code
+func (o *UninstallOrgWebhookDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this uninstall org webhook default response has a 4xx status code
+func (o *UninstallOrgWebhookDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this uninstall org webhook default response has a 5xx status code
+func (o *UninstallOrgWebhookDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this uninstall org webhook default response a status code equal to that given
+func (o *UninstallOrgWebhookDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the uninstall org webhook default response
+func (o *UninstallOrgWebhookDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UninstallOrgWebhookDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}/webhook][%d] UninstallOrgWebhook default %s", o._statusCode, payload)
+}
+
+func (o *UninstallOrgWebhookDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /organizations/{orgID}/webhook][%d] UninstallOrgWebhook default %s", o._statusCode, payload)
+}
+
+func (o *UninstallOrgWebhookDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UninstallOrgWebhookDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/update_org_parameters.go b/client/organizations/update_org_parameters.go
new file mode 100644
index 00000000..e2e32517
--- /dev/null
+++ b/client/organizations/update_org_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateOrgParams creates a new UpdateOrgParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateOrgParams() *UpdateOrgParams {
+ return &UpdateOrgParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateOrgParamsWithTimeout creates a new UpdateOrgParams object
+// with the ability to set a timeout on a request.
+func NewUpdateOrgParamsWithTimeout(timeout time.Duration) *UpdateOrgParams {
+ return &UpdateOrgParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateOrgParamsWithContext creates a new UpdateOrgParams object
+// with the ability to set a context for a request.
+func NewUpdateOrgParamsWithContext(ctx context.Context) *UpdateOrgParams {
+ return &UpdateOrgParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateOrgParamsWithHTTPClient creates a new UpdateOrgParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateOrgParamsWithHTTPClient(client *http.Client) *UpdateOrgParams {
+ return &UpdateOrgParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateOrgParams contains all the parameters to send to the API endpoint
+
+ for the update org operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateOrgParams struct {
+
+ /* Body.
+
+ Parameters used when updating the organization.
+ */
+ Body garm_params.UpdateEntityParams
+
+ /* OrgID.
+
+ ID of the organization to update.
+ */
+ OrgID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update org params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateOrgParams) WithDefaults() *UpdateOrgParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update org params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateOrgParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update org params
+func (o *UpdateOrgParams) WithTimeout(timeout time.Duration) *UpdateOrgParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update org params
+func (o *UpdateOrgParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update org params
+func (o *UpdateOrgParams) WithContext(ctx context.Context) *UpdateOrgParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update org params
+func (o *UpdateOrgParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update org params
+func (o *UpdateOrgParams) WithHTTPClient(client *http.Client) *UpdateOrgParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update org params
+func (o *UpdateOrgParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update org params
+func (o *UpdateOrgParams) WithBody(body garm_params.UpdateEntityParams) *UpdateOrgParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update org params
+func (o *UpdateOrgParams) SetBody(body garm_params.UpdateEntityParams) {
+ o.Body = body
+}
+
+// WithOrgID adds the orgID to the update org params
+func (o *UpdateOrgParams) WithOrgID(orgID string) *UpdateOrgParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the update org params
+func (o *UpdateOrgParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateOrgParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/update_org_pool_parameters.go b/client/organizations/update_org_pool_parameters.go
new file mode 100644
index 00000000..089441e4
--- /dev/null
+++ b/client/organizations/update_org_pool_parameters.go
@@ -0,0 +1,195 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateOrgPoolParams creates a new UpdateOrgPoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateOrgPoolParams() *UpdateOrgPoolParams {
+ return &UpdateOrgPoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateOrgPoolParamsWithTimeout creates a new UpdateOrgPoolParams object
+// with the ability to set a timeout on a request.
+func NewUpdateOrgPoolParamsWithTimeout(timeout time.Duration) *UpdateOrgPoolParams {
+ return &UpdateOrgPoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateOrgPoolParamsWithContext creates a new UpdateOrgPoolParams object
+// with the ability to set a context for a request.
+func NewUpdateOrgPoolParamsWithContext(ctx context.Context) *UpdateOrgPoolParams {
+ return &UpdateOrgPoolParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateOrgPoolParamsWithHTTPClient creates a new UpdateOrgPoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateOrgPoolParamsWithHTTPClient(client *http.Client) *UpdateOrgPoolParams {
+ return &UpdateOrgPoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateOrgPoolParams contains all the parameters to send to the API endpoint
+
+ for the update org pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateOrgPoolParams struct {
+
+ /* Body.
+
+ Parameters used when updating the organization pool.
+ */
+ Body garm_params.UpdatePoolParams
+
+ /* OrgID.
+
+ Organization ID.
+ */
+ OrgID string
+
+ /* PoolID.
+
+ ID of the organization pool to update.
+ */
+ PoolID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update org pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateOrgPoolParams) WithDefaults() *UpdateOrgPoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update org pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateOrgPoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update org pool params
+func (o *UpdateOrgPoolParams) WithTimeout(timeout time.Duration) *UpdateOrgPoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update org pool params
+func (o *UpdateOrgPoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update org pool params
+func (o *UpdateOrgPoolParams) WithContext(ctx context.Context) *UpdateOrgPoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update org pool params
+func (o *UpdateOrgPoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update org pool params
+func (o *UpdateOrgPoolParams) WithHTTPClient(client *http.Client) *UpdateOrgPoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update org pool params
+func (o *UpdateOrgPoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update org pool params
+func (o *UpdateOrgPoolParams) WithBody(body garm_params.UpdatePoolParams) *UpdateOrgPoolParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update org pool params
+func (o *UpdateOrgPoolParams) SetBody(body garm_params.UpdatePoolParams) {
+ o.Body = body
+}
+
+// WithOrgID adds the orgID to the update org pool params
+func (o *UpdateOrgPoolParams) WithOrgID(orgID string) *UpdateOrgPoolParams {
+ o.SetOrgID(orgID)
+ return o
+}
+
+// SetOrgID adds the orgId to the update org pool params
+func (o *UpdateOrgPoolParams) SetOrgID(orgID string) {
+ o.OrgID = orgID
+}
+
+// WithPoolID adds the poolID to the update org pool params
+func (o *UpdateOrgPoolParams) WithPoolID(poolID string) *UpdateOrgPoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the update org pool params
+func (o *UpdateOrgPoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateOrgPoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param orgID
+ if err := r.SetPathParam("orgID", o.OrgID); err != nil {
+ return err
+ }
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/organizations/update_org_pool_responses.go b/client/organizations/update_org_pool_responses.go
new file mode 100644
index 00000000..cad49146
--- /dev/null
+++ b/client/organizations/update_org_pool_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateOrgPoolReader is a Reader for the UpdateOrgPool structure.
+type UpdateOrgPoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateOrgPoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateOrgPoolOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateOrgPoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateOrgPoolOK creates a UpdateOrgPoolOK with default headers values
+func NewUpdateOrgPoolOK() *UpdateOrgPoolOK {
+ return &UpdateOrgPoolOK{}
+}
+
+/*
+UpdateOrgPoolOK describes a response with status code 200, with default header values.
+
+Pool
+*/
+type UpdateOrgPoolOK struct {
+ Payload garm_params.Pool
+}
+
+// IsSuccess returns true when this update org pool o k response has a 2xx status code
+func (o *UpdateOrgPoolOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update org pool o k response has a 3xx status code
+func (o *UpdateOrgPoolOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update org pool o k response has a 4xx status code
+func (o *UpdateOrgPoolOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update org pool o k response has a 5xx status code
+func (o *UpdateOrgPoolOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update org pool o k response a status code equal to that given
+func (o *UpdateOrgPoolOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update org pool o k response
+func (o *UpdateOrgPoolOK) Code() int {
+ return 200
+}
+
+func (o *UpdateOrgPoolOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] updateOrgPoolOK %s", 200, payload)
+}
+
+func (o *UpdateOrgPoolOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] updateOrgPoolOK %s", 200, payload)
+}
+
+func (o *UpdateOrgPoolOK) GetPayload() garm_params.Pool {
+ return o.Payload
+}
+
+func (o *UpdateOrgPoolOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateOrgPoolDefault creates a UpdateOrgPoolDefault with default headers values
+func NewUpdateOrgPoolDefault(code int) *UpdateOrgPoolDefault {
+ return &UpdateOrgPoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateOrgPoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateOrgPoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update org pool default response has a 2xx status code
+func (o *UpdateOrgPoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update org pool default response has a 3xx status code
+func (o *UpdateOrgPoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update org pool default response has a 4xx status code
+func (o *UpdateOrgPoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update org pool default response has a 5xx status code
+func (o *UpdateOrgPoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update org pool default response a status code equal to that given
+func (o *UpdateOrgPoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update org pool default response
+func (o *UpdateOrgPoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateOrgPoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] UpdateOrgPool default %s", o._statusCode, payload)
+}
+
+func (o *UpdateOrgPoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}/pools/{poolID}][%d] UpdateOrgPool default %s", o._statusCode, payload)
+}
+
+func (o *UpdateOrgPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateOrgPoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/organizations/update_org_responses.go b/client/organizations/update_org_responses.go
new file mode 100644
index 00000000..d6483c54
--- /dev/null
+++ b/client/organizations/update_org_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package organizations
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateOrgReader is a Reader for the UpdateOrg structure.
+type UpdateOrgReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateOrgReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateOrgOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateOrgDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateOrgOK creates a UpdateOrgOK with default headers values
+func NewUpdateOrgOK() *UpdateOrgOK {
+ return &UpdateOrgOK{}
+}
+
+/*
+UpdateOrgOK describes a response with status code 200, with default header values.
+
+Organization
+*/
+type UpdateOrgOK struct {
+ Payload garm_params.Organization
+}
+
+// IsSuccess returns true when this update org o k response has a 2xx status code
+func (o *UpdateOrgOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update org o k response has a 3xx status code
+func (o *UpdateOrgOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update org o k response has a 4xx status code
+func (o *UpdateOrgOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update org o k response has a 5xx status code
+func (o *UpdateOrgOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update org o k response a status code equal to that given
+func (o *UpdateOrgOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update org o k response
+func (o *UpdateOrgOK) Code() int {
+ return 200
+}
+
+func (o *UpdateOrgOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}][%d] updateOrgOK %s", 200, payload)
+}
+
+func (o *UpdateOrgOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}][%d] updateOrgOK %s", 200, payload)
+}
+
+func (o *UpdateOrgOK) GetPayload() garm_params.Organization {
+ return o.Payload
+}
+
+func (o *UpdateOrgOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateOrgDefault creates a UpdateOrgDefault with default headers values
+func NewUpdateOrgDefault(code int) *UpdateOrgDefault {
+ return &UpdateOrgDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateOrgDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateOrgDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update org default response has a 2xx status code
+func (o *UpdateOrgDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update org default response has a 3xx status code
+func (o *UpdateOrgDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update org default response has a 4xx status code
+func (o *UpdateOrgDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update org default response has a 5xx status code
+func (o *UpdateOrgDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update org default response a status code equal to that given
+func (o *UpdateOrgDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update org default response
+func (o *UpdateOrgDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateOrgDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}][%d] UpdateOrg default %s", o._statusCode, payload)
+}
+
+func (o *UpdateOrgDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /organizations/{orgID}][%d] UpdateOrg default %s", o._statusCode, payload)
+}
+
+func (o *UpdateOrgDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateOrgDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/pools/delete_pool_parameters.go b/client/pools/delete_pool_parameters.go
new file mode 100644
index 00000000..4524703d
--- /dev/null
+++ b/client/pools/delete_pool_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package pools
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeletePoolParams creates a new DeletePoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeletePoolParams() *DeletePoolParams {
+ return &DeletePoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeletePoolParamsWithTimeout creates a new DeletePoolParams object
+// with the ability to set a timeout on a request.
+func NewDeletePoolParamsWithTimeout(timeout time.Duration) *DeletePoolParams {
+ return &DeletePoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeletePoolParamsWithContext creates a new DeletePoolParams object
+// with the ability to set a context for a request.
+func NewDeletePoolParamsWithContext(ctx context.Context) *DeletePoolParams {
+ return &DeletePoolParams{
+ Context: ctx,
+ }
+}
+
+// NewDeletePoolParamsWithHTTPClient creates a new DeletePoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeletePoolParamsWithHTTPClient(client *http.Client) *DeletePoolParams {
+ return &DeletePoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeletePoolParams contains all the parameters to send to the API endpoint
+
+ for the delete pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeletePoolParams struct {
+
+ /* PoolID.
+
+ ID of the pool to delete.
+ */
+ PoolID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeletePoolParams) WithDefaults() *DeletePoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeletePoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete pool params
+func (o *DeletePoolParams) WithTimeout(timeout time.Duration) *DeletePoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete pool params
+func (o *DeletePoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete pool params
+func (o *DeletePoolParams) WithContext(ctx context.Context) *DeletePoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete pool params
+func (o *DeletePoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete pool params
+func (o *DeletePoolParams) WithHTTPClient(client *http.Client) *DeletePoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete pool params
+func (o *DeletePoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithPoolID adds the poolID to the delete pool params
+func (o *DeletePoolParams) WithPoolID(poolID string) *DeletePoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the delete pool params
+func (o *DeletePoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeletePoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/pools/delete_pool_responses.go b/client/pools/delete_pool_responses.go
new file mode 100644
index 00000000..18a3aee3
--- /dev/null
+++ b/client/pools/delete_pool_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package pools
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeletePoolReader is a Reader for the DeletePool structure.
+type DeletePoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeletePoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeletePoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeletePoolDefault creates a DeletePoolDefault with default headers values
+func NewDeletePoolDefault(code int) *DeletePoolDefault {
+ return &DeletePoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeletePoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeletePoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete pool default response has a 2xx status code
+func (o *DeletePoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete pool default response has a 3xx status code
+func (o *DeletePoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete pool default response has a 4xx status code
+func (o *DeletePoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete pool default response has a 5xx status code
+func (o *DeletePoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete pool default response a status code equal to that given
+func (o *DeletePoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete pool default response
+func (o *DeletePoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeletePoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /pools/{poolID}][%d] DeletePool default %s", o._statusCode, payload)
+}
+
+func (o *DeletePoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /pools/{poolID}][%d] DeletePool default %s", o._statusCode, payload)
+}
+
+func (o *DeletePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeletePoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/pools/get_pool_parameters.go b/client/pools/get_pool_parameters.go
new file mode 100644
index 00000000..c4871e02
--- /dev/null
+++ b/client/pools/get_pool_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package pools
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetPoolParams creates a new GetPoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetPoolParams() *GetPoolParams {
+ return &GetPoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetPoolParamsWithTimeout creates a new GetPoolParams object
+// with the ability to set a timeout on a request.
+func NewGetPoolParamsWithTimeout(timeout time.Duration) *GetPoolParams {
+ return &GetPoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetPoolParamsWithContext creates a new GetPoolParams object
+// with the ability to set a context for a request.
+func NewGetPoolParamsWithContext(ctx context.Context) *GetPoolParams {
+ return &GetPoolParams{
+ Context: ctx,
+ }
+}
+
+// NewGetPoolParamsWithHTTPClient creates a new GetPoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetPoolParamsWithHTTPClient(client *http.Client) *GetPoolParams {
+ return &GetPoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetPoolParams contains all the parameters to send to the API endpoint
+
+ for the get pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetPoolParams struct {
+
+ /* PoolID.
+
+ ID of the pool to fetch.
+ */
+ PoolID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetPoolParams) WithDefaults() *GetPoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetPoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get pool params
+func (o *GetPoolParams) WithTimeout(timeout time.Duration) *GetPoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get pool params
+func (o *GetPoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get pool params
+func (o *GetPoolParams) WithContext(ctx context.Context) *GetPoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get pool params
+func (o *GetPoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get pool params
+func (o *GetPoolParams) WithHTTPClient(client *http.Client) *GetPoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get pool params
+func (o *GetPoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithPoolID adds the poolID to the get pool params
+func (o *GetPoolParams) WithPoolID(poolID string) *GetPoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the get pool params
+func (o *GetPoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetPoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/pools/get_pool_responses.go b/client/pools/get_pool_responses.go
new file mode 100644
index 00000000..8638dd37
--- /dev/null
+++ b/client/pools/get_pool_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package pools
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetPoolReader is a Reader for the GetPool structure.
+type GetPoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetPoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetPoolOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetPoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetPoolOK creates a GetPoolOK with default headers values
+func NewGetPoolOK() *GetPoolOK {
+ return &GetPoolOK{}
+}
+
+/*
+GetPoolOK describes a response with status code 200, with default header values.
+
+Pool
+*/
+type GetPoolOK struct {
+ Payload garm_params.Pool
+}
+
+// IsSuccess returns true when this get pool o k response has a 2xx status code
+func (o *GetPoolOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get pool o k response has a 3xx status code
+func (o *GetPoolOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get pool o k response has a 4xx status code
+func (o *GetPoolOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get pool o k response has a 5xx status code
+func (o *GetPoolOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get pool o k response a status code equal to that given
+func (o *GetPoolOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get pool o k response
+func (o *GetPoolOK) Code() int {
+ return 200
+}
+
+func (o *GetPoolOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}][%d] getPoolOK %s", 200, payload)
+}
+
+func (o *GetPoolOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}][%d] getPoolOK %s", 200, payload)
+}
+
+func (o *GetPoolOK) GetPayload() garm_params.Pool {
+ return o.Payload
+}
+
+func (o *GetPoolOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetPoolDefault creates a GetPoolDefault with default headers values
+func NewGetPoolDefault(code int) *GetPoolDefault {
+ return &GetPoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetPoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetPoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get pool default response has a 2xx status code
+func (o *GetPoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get pool default response has a 3xx status code
+func (o *GetPoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get pool default response has a 4xx status code
+func (o *GetPoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get pool default response has a 5xx status code
+func (o *GetPoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get pool default response a status code equal to that given
+func (o *GetPoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get pool default response
+func (o *GetPoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetPoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}][%d] GetPool default %s", o._statusCode, payload)
+}
+
+func (o *GetPoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools/{poolID}][%d] GetPool default %s", o._statusCode, payload)
+}
+
+func (o *GetPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetPoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/pools/list_pools_parameters.go b/client/pools/list_pools_parameters.go
new file mode 100644
index 00000000..361dec69
--- /dev/null
+++ b/client/pools/list_pools_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package pools
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListPoolsParams creates a new ListPoolsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListPoolsParams() *ListPoolsParams {
+ return &ListPoolsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListPoolsParamsWithTimeout creates a new ListPoolsParams object
+// with the ability to set a timeout on a request.
+func NewListPoolsParamsWithTimeout(timeout time.Duration) *ListPoolsParams {
+ return &ListPoolsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListPoolsParamsWithContext creates a new ListPoolsParams object
+// with the ability to set a context for a request.
+func NewListPoolsParamsWithContext(ctx context.Context) *ListPoolsParams {
+ return &ListPoolsParams{
+ Context: ctx,
+ }
+}
+
+// NewListPoolsParamsWithHTTPClient creates a new ListPoolsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListPoolsParamsWithHTTPClient(client *http.Client) *ListPoolsParams {
+ return &ListPoolsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListPoolsParams contains all the parameters to send to the API endpoint
+
+ for the list pools operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListPoolsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list pools params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListPoolsParams) WithDefaults() *ListPoolsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list pools params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListPoolsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list pools params
+func (o *ListPoolsParams) WithTimeout(timeout time.Duration) *ListPoolsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list pools params
+func (o *ListPoolsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list pools params
+func (o *ListPoolsParams) WithContext(ctx context.Context) *ListPoolsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list pools params
+func (o *ListPoolsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list pools params
+func (o *ListPoolsParams) WithHTTPClient(client *http.Client) *ListPoolsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list pools params
+func (o *ListPoolsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListPoolsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/pools/list_pools_responses.go b/client/pools/list_pools_responses.go
new file mode 100644
index 00000000..9cc36c8f
--- /dev/null
+++ b/client/pools/list_pools_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package pools
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListPoolsReader is a Reader for the ListPools structure.
+type ListPoolsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListPoolsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListPoolsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListPoolsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListPoolsOK creates a ListPoolsOK with default headers values
+func NewListPoolsOK() *ListPoolsOK {
+ return &ListPoolsOK{}
+}
+
+/*
+ListPoolsOK describes a response with status code 200, with default header values.
+
+Pools
+*/
+type ListPoolsOK struct {
+ Payload garm_params.Pools
+}
+
+// IsSuccess returns true when this list pools o k response has a 2xx status code
+func (o *ListPoolsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list pools o k response has a 3xx status code
+func (o *ListPoolsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list pools o k response has a 4xx status code
+func (o *ListPoolsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list pools o k response has a 5xx status code
+func (o *ListPoolsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list pools o k response a status code equal to that given
+func (o *ListPoolsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list pools o k response
+func (o *ListPoolsOK) Code() int {
+ return 200
+}
+
+func (o *ListPoolsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools][%d] listPoolsOK %s", 200, payload)
+}
+
+func (o *ListPoolsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools][%d] listPoolsOK %s", 200, payload)
+}
+
+func (o *ListPoolsOK) GetPayload() garm_params.Pools {
+ return o.Payload
+}
+
+func (o *ListPoolsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListPoolsDefault creates a ListPoolsDefault with default headers values
+func NewListPoolsDefault(code int) *ListPoolsDefault {
+ return &ListPoolsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListPoolsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListPoolsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list pools default response has a 2xx status code
+func (o *ListPoolsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list pools default response has a 3xx status code
+func (o *ListPoolsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list pools default response has a 4xx status code
+func (o *ListPoolsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list pools default response has a 5xx status code
+func (o *ListPoolsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list pools default response a status code equal to that given
+func (o *ListPoolsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list pools default response
+func (o *ListPoolsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListPoolsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools][%d] ListPools default %s", o._statusCode, payload)
+}
+
+func (o *ListPoolsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /pools][%d] ListPools default %s", o._statusCode, payload)
+}
+
+func (o *ListPoolsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListPoolsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/pools/pools_client.go b/client/pools/pools_client.go
new file mode 100644
index 00000000..604a2e46
--- /dev/null
+++ b/client/pools/pools_client.go
@@ -0,0 +1,217 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package pools
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new pools API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new pools API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new pools API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for pools API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ DeletePool(params *DeletePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ GetPool(params *GetPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetPoolOK, error)
+
+ ListPools(params *ListPoolsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListPoolsOK, error)
+
+ UpdatePool(params *UpdatePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdatePoolOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+DeletePool deletes pool by ID
+*/
+func (a *Client) DeletePool(params *DeletePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeletePoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeletePool",
+ Method: "DELETE",
+ PathPattern: "/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeletePoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+GetPool gets pool by ID
+*/
+func (a *Client) GetPool(params *GetPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetPoolOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetPoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetPool",
+ Method: "GET",
+ PathPattern: "/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetPoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetPoolOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetPoolDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListPools lists all pools
+*/
+func (a *Client) ListPools(params *ListPoolsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListPoolsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListPoolsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListPools",
+ Method: "GET",
+ PathPattern: "/pools",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListPoolsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListPoolsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListPoolsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UpdatePool updates pool by ID
+*/
+func (a *Client) UpdatePool(params *UpdatePoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdatePoolOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdatePoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdatePool",
+ Method: "PUT",
+ PathPattern: "/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdatePoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdatePoolOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdatePoolDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/pools/update_pool_parameters.go b/client/pools/update_pool_parameters.go
new file mode 100644
index 00000000..75bf5c96
--- /dev/null
+++ b/client/pools/update_pool_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package pools
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdatePoolParams creates a new UpdatePoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdatePoolParams() *UpdatePoolParams {
+ return &UpdatePoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdatePoolParamsWithTimeout creates a new UpdatePoolParams object
+// with the ability to set a timeout on a request.
+func NewUpdatePoolParamsWithTimeout(timeout time.Duration) *UpdatePoolParams {
+ return &UpdatePoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdatePoolParamsWithContext creates a new UpdatePoolParams object
+// with the ability to set a context for a request.
+func NewUpdatePoolParamsWithContext(ctx context.Context) *UpdatePoolParams {
+ return &UpdatePoolParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdatePoolParamsWithHTTPClient creates a new UpdatePoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdatePoolParamsWithHTTPClient(client *http.Client) *UpdatePoolParams {
+ return &UpdatePoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdatePoolParams contains all the parameters to send to the API endpoint
+
+ for the update pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdatePoolParams struct {
+
+ /* Body.
+
+ Parameters to update the pool with.
+ */
+ Body garm_params.UpdatePoolParams
+
+ /* PoolID.
+
+ ID of the pool to update.
+ */
+ PoolID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdatePoolParams) WithDefaults() *UpdatePoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdatePoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update pool params
+func (o *UpdatePoolParams) WithTimeout(timeout time.Duration) *UpdatePoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update pool params
+func (o *UpdatePoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update pool params
+func (o *UpdatePoolParams) WithContext(ctx context.Context) *UpdatePoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update pool params
+func (o *UpdatePoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update pool params
+func (o *UpdatePoolParams) WithHTTPClient(client *http.Client) *UpdatePoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update pool params
+func (o *UpdatePoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update pool params
+func (o *UpdatePoolParams) WithBody(body garm_params.UpdatePoolParams) *UpdatePoolParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update pool params
+func (o *UpdatePoolParams) SetBody(body garm_params.UpdatePoolParams) {
+ o.Body = body
+}
+
+// WithPoolID adds the poolID to the update pool params
+func (o *UpdatePoolParams) WithPoolID(poolID string) *UpdatePoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the update pool params
+func (o *UpdatePoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdatePoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/pools/update_pool_responses.go b/client/pools/update_pool_responses.go
new file mode 100644
index 00000000..baf5a2bf
--- /dev/null
+++ b/client/pools/update_pool_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package pools
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdatePoolReader is a Reader for the UpdatePool structure.
+type UpdatePoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdatePoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdatePoolOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdatePoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdatePoolOK creates a UpdatePoolOK with default headers values
+func NewUpdatePoolOK() *UpdatePoolOK {
+ return &UpdatePoolOK{}
+}
+
+/*
+UpdatePoolOK describes a response with status code 200, with default header values.
+
+Pool
+*/
+type UpdatePoolOK struct {
+ Payload garm_params.Pool
+}
+
+// IsSuccess returns true when this update pool o k response has a 2xx status code
+func (o *UpdatePoolOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update pool o k response has a 3xx status code
+func (o *UpdatePoolOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update pool o k response has a 4xx status code
+func (o *UpdatePoolOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update pool o k response has a 5xx status code
+func (o *UpdatePoolOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update pool o k response a status code equal to that given
+func (o *UpdatePoolOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update pool o k response
+func (o *UpdatePoolOK) Code() int {
+ return 200
+}
+
+func (o *UpdatePoolOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /pools/{poolID}][%d] updatePoolOK %s", 200, payload)
+}
+
+func (o *UpdatePoolOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /pools/{poolID}][%d] updatePoolOK %s", 200, payload)
+}
+
+func (o *UpdatePoolOK) GetPayload() garm_params.Pool {
+ return o.Payload
+}
+
+func (o *UpdatePoolOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdatePoolDefault creates a UpdatePoolDefault with default headers values
+func NewUpdatePoolDefault(code int) *UpdatePoolDefault {
+ return &UpdatePoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdatePoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdatePoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update pool default response has a 2xx status code
+func (o *UpdatePoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update pool default response has a 3xx status code
+func (o *UpdatePoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update pool default response has a 4xx status code
+func (o *UpdatePoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update pool default response has a 5xx status code
+func (o *UpdatePoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update pool default response a status code equal to that given
+func (o *UpdatePoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update pool default response
+func (o *UpdatePoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdatePoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /pools/{poolID}][%d] UpdatePool default %s", o._statusCode, payload)
+}
+
+func (o *UpdatePoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /pools/{poolID}][%d] UpdatePool default %s", o._statusCode, payload)
+}
+
+func (o *UpdatePoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdatePoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/providers/list_providers_parameters.go b/client/providers/list_providers_parameters.go
new file mode 100644
index 00000000..e411b1aa
--- /dev/null
+++ b/client/providers/list_providers_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package providers
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListProvidersParams creates a new ListProvidersParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListProvidersParams() *ListProvidersParams {
+ return &ListProvidersParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListProvidersParamsWithTimeout creates a new ListProvidersParams object
+// with the ability to set a timeout on a request.
+func NewListProvidersParamsWithTimeout(timeout time.Duration) *ListProvidersParams {
+ return &ListProvidersParams{
+ timeout: timeout,
+ }
+}
+
+// NewListProvidersParamsWithContext creates a new ListProvidersParams object
+// with the ability to set a context for a request.
+func NewListProvidersParamsWithContext(ctx context.Context) *ListProvidersParams {
+ return &ListProvidersParams{
+ Context: ctx,
+ }
+}
+
+// NewListProvidersParamsWithHTTPClient creates a new ListProvidersParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListProvidersParamsWithHTTPClient(client *http.Client) *ListProvidersParams {
+ return &ListProvidersParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListProvidersParams contains all the parameters to send to the API endpoint
+
+ for the list providers operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListProvidersParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list providers params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListProvidersParams) WithDefaults() *ListProvidersParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list providers params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListProvidersParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list providers params
+func (o *ListProvidersParams) WithTimeout(timeout time.Duration) *ListProvidersParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list providers params
+func (o *ListProvidersParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list providers params
+func (o *ListProvidersParams) WithContext(ctx context.Context) *ListProvidersParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list providers params
+func (o *ListProvidersParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list providers params
+func (o *ListProvidersParams) WithHTTPClient(client *http.Client) *ListProvidersParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list providers params
+func (o *ListProvidersParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListProvidersParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/providers/list_providers_responses.go b/client/providers/list_providers_responses.go
new file mode 100644
index 00000000..14a042c9
--- /dev/null
+++ b/client/providers/list_providers_responses.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package providers
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListProvidersReader is a Reader for the ListProviders structure.
+type ListProvidersReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListProvidersReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListProvidersOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewListProvidersBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("[GET /providers] ListProviders", response, response.Code())
+ }
+}
+
+// NewListProvidersOK creates a ListProvidersOK with default headers values
+func NewListProvidersOK() *ListProvidersOK {
+ return &ListProvidersOK{}
+}
+
+/*
+ListProvidersOK describes a response with status code 200, with default header values.
+
+Providers
+*/
+type ListProvidersOK struct {
+ Payload garm_params.Providers
+}
+
+// IsSuccess returns true when this list providers o k response has a 2xx status code
+func (o *ListProvidersOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list providers o k response has a 3xx status code
+func (o *ListProvidersOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list providers o k response has a 4xx status code
+func (o *ListProvidersOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list providers o k response has a 5xx status code
+func (o *ListProvidersOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list providers o k response a status code equal to that given
+func (o *ListProvidersOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list providers o k response
+func (o *ListProvidersOK) Code() int {
+ return 200
+}
+
+func (o *ListProvidersOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /providers][%d] listProvidersOK %s", 200, payload)
+}
+
+func (o *ListProvidersOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /providers][%d] listProvidersOK %s", 200, payload)
+}
+
+func (o *ListProvidersOK) GetPayload() garm_params.Providers {
+ return o.Payload
+}
+
+func (o *ListProvidersOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListProvidersBadRequest creates a ListProvidersBadRequest with default headers values
+func NewListProvidersBadRequest() *ListProvidersBadRequest {
+ return &ListProvidersBadRequest{}
+}
+
+/*
+ListProvidersBadRequest describes a response with status code 400, with default header values.
+
+APIErrorResponse
+*/
+type ListProvidersBadRequest struct {
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list providers bad request response has a 2xx status code
+func (o *ListProvidersBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this list providers bad request response has a 3xx status code
+func (o *ListProvidersBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list providers bad request response has a 4xx status code
+func (o *ListProvidersBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this list providers bad request response has a 5xx status code
+func (o *ListProvidersBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list providers bad request response a status code equal to that given
+func (o *ListProvidersBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+// Code gets the status code for the list providers bad request response
+func (o *ListProvidersBadRequest) Code() int {
+ return 400
+}
+
+func (o *ListProvidersBadRequest) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /providers][%d] listProvidersBadRequest %s", 400, payload)
+}
+
+func (o *ListProvidersBadRequest) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /providers][%d] listProvidersBadRequest %s", 400, payload)
+}
+
+func (o *ListProvidersBadRequest) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListProvidersBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/providers/providers_client.go b/client/providers/providers_client.go
new file mode 100644
index 00000000..ab2600e8
--- /dev/null
+++ b/client/providers/providers_client.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package providers
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new providers API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new providers API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new providers API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for providers API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ ListProviders(params *ListProvidersParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListProvidersOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+ListProviders lists all providers
+*/
+func (a *Client) ListProviders(params *ListProvidersParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListProvidersOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListProvidersParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListProviders",
+ Method: "GET",
+ PathPattern: "/providers",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListProvidersReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListProvidersOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for ListProviders: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/repositories/create_repo_parameters.go b/client/repositories/create_repo_parameters.go
new file mode 100644
index 00000000..56b22e09
--- /dev/null
+++ b/client/repositories/create_repo_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateRepoParams creates a new CreateRepoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateRepoParams() *CreateRepoParams {
+ return &CreateRepoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateRepoParamsWithTimeout creates a new CreateRepoParams object
+// with the ability to set a timeout on a request.
+func NewCreateRepoParamsWithTimeout(timeout time.Duration) *CreateRepoParams {
+ return &CreateRepoParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateRepoParamsWithContext creates a new CreateRepoParams object
+// with the ability to set a context for a request.
+func NewCreateRepoParamsWithContext(ctx context.Context) *CreateRepoParams {
+ return &CreateRepoParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateRepoParamsWithHTTPClient creates a new CreateRepoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateRepoParamsWithHTTPClient(client *http.Client) *CreateRepoParams {
+ return &CreateRepoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateRepoParams contains all the parameters to send to the API endpoint
+
+ for the create repo operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateRepoParams struct {
+
+ /* Body.
+
+ Parameters used when creating the repository.
+ */
+ Body garm_params.CreateRepoParams
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create repo params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateRepoParams) WithDefaults() *CreateRepoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create repo params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateRepoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create repo params
+func (o *CreateRepoParams) WithTimeout(timeout time.Duration) *CreateRepoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create repo params
+func (o *CreateRepoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create repo params
+func (o *CreateRepoParams) WithContext(ctx context.Context) *CreateRepoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create repo params
+func (o *CreateRepoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create repo params
+func (o *CreateRepoParams) WithHTTPClient(client *http.Client) *CreateRepoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create repo params
+func (o *CreateRepoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create repo params
+func (o *CreateRepoParams) WithBody(body garm_params.CreateRepoParams) *CreateRepoParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create repo params
+func (o *CreateRepoParams) SetBody(body garm_params.CreateRepoParams) {
+ o.Body = body
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateRepoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/create_repo_pool_parameters.go b/client/repositories/create_repo_pool_parameters.go
new file mode 100644
index 00000000..d4b7fa40
--- /dev/null
+++ b/client/repositories/create_repo_pool_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateRepoPoolParams creates a new CreateRepoPoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateRepoPoolParams() *CreateRepoPoolParams {
+ return &CreateRepoPoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateRepoPoolParamsWithTimeout creates a new CreateRepoPoolParams object
+// with the ability to set a timeout on a request.
+func NewCreateRepoPoolParamsWithTimeout(timeout time.Duration) *CreateRepoPoolParams {
+ return &CreateRepoPoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateRepoPoolParamsWithContext creates a new CreateRepoPoolParams object
+// with the ability to set a context for a request.
+func NewCreateRepoPoolParamsWithContext(ctx context.Context) *CreateRepoPoolParams {
+ return &CreateRepoPoolParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateRepoPoolParamsWithHTTPClient creates a new CreateRepoPoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateRepoPoolParamsWithHTTPClient(client *http.Client) *CreateRepoPoolParams {
+ return &CreateRepoPoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateRepoPoolParams contains all the parameters to send to the API endpoint
+
+ for the create repo pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateRepoPoolParams struct {
+
+ /* Body.
+
+ Parameters used when creating the repository pool.
+ */
+ Body garm_params.CreatePoolParams
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create repo pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateRepoPoolParams) WithDefaults() *CreateRepoPoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create repo pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateRepoPoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create repo pool params
+func (o *CreateRepoPoolParams) WithTimeout(timeout time.Duration) *CreateRepoPoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create repo pool params
+func (o *CreateRepoPoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create repo pool params
+func (o *CreateRepoPoolParams) WithContext(ctx context.Context) *CreateRepoPoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create repo pool params
+func (o *CreateRepoPoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create repo pool params
+func (o *CreateRepoPoolParams) WithHTTPClient(client *http.Client) *CreateRepoPoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create repo pool params
+func (o *CreateRepoPoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create repo pool params
+func (o *CreateRepoPoolParams) WithBody(body garm_params.CreatePoolParams) *CreateRepoPoolParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create repo pool params
+func (o *CreateRepoPoolParams) SetBody(body garm_params.CreatePoolParams) {
+ o.Body = body
+}
+
+// WithRepoID adds the repoID to the create repo pool params
+func (o *CreateRepoPoolParams) WithRepoID(repoID string) *CreateRepoPoolParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the create repo pool params
+func (o *CreateRepoPoolParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateRepoPoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/create_repo_pool_responses.go b/client/repositories/create_repo_pool_responses.go
new file mode 100644
index 00000000..2008f1e5
--- /dev/null
+++ b/client/repositories/create_repo_pool_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateRepoPoolReader is a Reader for the CreateRepoPool structure.
+type CreateRepoPoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateRepoPoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateRepoPoolOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateRepoPoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateRepoPoolOK creates a CreateRepoPoolOK with default headers values
+func NewCreateRepoPoolOK() *CreateRepoPoolOK {
+ return &CreateRepoPoolOK{}
+}
+
+/*
+CreateRepoPoolOK describes a response with status code 200, with default header values.
+
+Pool
+*/
+type CreateRepoPoolOK struct {
+ Payload garm_params.Pool
+}
+
+// IsSuccess returns true when this create repo pool o k response has a 2xx status code
+func (o *CreateRepoPoolOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create repo pool o k response has a 3xx status code
+func (o *CreateRepoPoolOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create repo pool o k response has a 4xx status code
+func (o *CreateRepoPoolOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create repo pool o k response has a 5xx status code
+func (o *CreateRepoPoolOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create repo pool o k response a status code equal to that given
+func (o *CreateRepoPoolOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create repo pool o k response
+func (o *CreateRepoPoolOK) Code() int {
+ return 200
+}
+
+func (o *CreateRepoPoolOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] createRepoPoolOK %s", 200, payload)
+}
+
+func (o *CreateRepoPoolOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] createRepoPoolOK %s", 200, payload)
+}
+
+func (o *CreateRepoPoolOK) GetPayload() garm_params.Pool {
+ return o.Payload
+}
+
+func (o *CreateRepoPoolOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateRepoPoolDefault creates a CreateRepoPoolDefault with default headers values
+func NewCreateRepoPoolDefault(code int) *CreateRepoPoolDefault {
+ return &CreateRepoPoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateRepoPoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateRepoPoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create repo pool default response has a 2xx status code
+func (o *CreateRepoPoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create repo pool default response has a 3xx status code
+func (o *CreateRepoPoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create repo pool default response has a 4xx status code
+func (o *CreateRepoPoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create repo pool default response has a 5xx status code
+func (o *CreateRepoPoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create repo pool default response a status code equal to that given
+func (o *CreateRepoPoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create repo pool default response
+func (o *CreateRepoPoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateRepoPoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] CreateRepoPool default %s", o._statusCode, payload)
+}
+
+func (o *CreateRepoPoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/pools][%d] CreateRepoPool default %s", o._statusCode, payload)
+}
+
+func (o *CreateRepoPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateRepoPoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/create_repo_responses.go b/client/repositories/create_repo_responses.go
new file mode 100644
index 00000000..c5556097
--- /dev/null
+++ b/client/repositories/create_repo_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateRepoReader is a Reader for the CreateRepo structure.
+type CreateRepoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateRepoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateRepoOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateRepoDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateRepoOK creates a CreateRepoOK with default headers values
+func NewCreateRepoOK() *CreateRepoOK {
+ return &CreateRepoOK{}
+}
+
+/*
+CreateRepoOK describes a response with status code 200, with default header values.
+
+Repository
+*/
+type CreateRepoOK struct {
+ Payload garm_params.Repository
+}
+
+// IsSuccess returns true when this create repo o k response has a 2xx status code
+func (o *CreateRepoOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create repo o k response has a 3xx status code
+func (o *CreateRepoOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create repo o k response has a 4xx status code
+func (o *CreateRepoOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create repo o k response has a 5xx status code
+func (o *CreateRepoOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create repo o k response a status code equal to that given
+func (o *CreateRepoOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create repo o k response
+func (o *CreateRepoOK) Code() int {
+ return 200
+}
+
+func (o *CreateRepoOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories][%d] createRepoOK %s", 200, payload)
+}
+
+func (o *CreateRepoOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories][%d] createRepoOK %s", 200, payload)
+}
+
+func (o *CreateRepoOK) GetPayload() garm_params.Repository {
+ return o.Payload
+}
+
+func (o *CreateRepoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateRepoDefault creates a CreateRepoDefault with default headers values
+func NewCreateRepoDefault(code int) *CreateRepoDefault {
+ return &CreateRepoDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateRepoDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateRepoDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create repo default response has a 2xx status code
+func (o *CreateRepoDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create repo default response has a 3xx status code
+func (o *CreateRepoDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create repo default response has a 4xx status code
+func (o *CreateRepoDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create repo default response has a 5xx status code
+func (o *CreateRepoDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create repo default response a status code equal to that given
+func (o *CreateRepoDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create repo default response
+func (o *CreateRepoDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateRepoDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories][%d] CreateRepo default %s", o._statusCode, payload)
+}
+
+func (o *CreateRepoDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories][%d] CreateRepo default %s", o._statusCode, payload)
+}
+
+func (o *CreateRepoDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateRepoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/create_repo_scale_set_parameters.go b/client/repositories/create_repo_scale_set_parameters.go
new file mode 100644
index 00000000..9b8784dc
--- /dev/null
+++ b/client/repositories/create_repo_scale_set_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewCreateRepoScaleSetParams creates a new CreateRepoScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewCreateRepoScaleSetParams() *CreateRepoScaleSetParams {
+ return &CreateRepoScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewCreateRepoScaleSetParamsWithTimeout creates a new CreateRepoScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewCreateRepoScaleSetParamsWithTimeout(timeout time.Duration) *CreateRepoScaleSetParams {
+ return &CreateRepoScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewCreateRepoScaleSetParamsWithContext creates a new CreateRepoScaleSetParams object
+// with the ability to set a context for a request.
+func NewCreateRepoScaleSetParamsWithContext(ctx context.Context) *CreateRepoScaleSetParams {
+ return &CreateRepoScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewCreateRepoScaleSetParamsWithHTTPClient creates a new CreateRepoScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewCreateRepoScaleSetParamsWithHTTPClient(client *http.Client) *CreateRepoScaleSetParams {
+ return &CreateRepoScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+CreateRepoScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the create repo scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type CreateRepoScaleSetParams struct {
+
+ /* Body.
+
+ Parameters used when creating the repository scale set.
+ */
+ Body garm_params.CreateScaleSetParams
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the create repo scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateRepoScaleSetParams) WithDefaults() *CreateRepoScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the create repo scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *CreateRepoScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the create repo scale set params
+func (o *CreateRepoScaleSetParams) WithTimeout(timeout time.Duration) *CreateRepoScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the create repo scale set params
+func (o *CreateRepoScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the create repo scale set params
+func (o *CreateRepoScaleSetParams) WithContext(ctx context.Context) *CreateRepoScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the create repo scale set params
+func (o *CreateRepoScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the create repo scale set params
+func (o *CreateRepoScaleSetParams) WithHTTPClient(client *http.Client) *CreateRepoScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the create repo scale set params
+func (o *CreateRepoScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the create repo scale set params
+func (o *CreateRepoScaleSetParams) WithBody(body garm_params.CreateScaleSetParams) *CreateRepoScaleSetParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the create repo scale set params
+func (o *CreateRepoScaleSetParams) SetBody(body garm_params.CreateScaleSetParams) {
+ o.Body = body
+}
+
+// WithRepoID adds the repoID to the create repo scale set params
+func (o *CreateRepoScaleSetParams) WithRepoID(repoID string) *CreateRepoScaleSetParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the create repo scale set params
+func (o *CreateRepoScaleSetParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *CreateRepoScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/create_repo_scale_set_responses.go b/client/repositories/create_repo_scale_set_responses.go
new file mode 100644
index 00000000..4d02d5c1
--- /dev/null
+++ b/client/repositories/create_repo_scale_set_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// CreateRepoScaleSetReader is a Reader for the CreateRepoScaleSet structure.
+type CreateRepoScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *CreateRepoScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewCreateRepoScaleSetOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewCreateRepoScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewCreateRepoScaleSetOK creates a CreateRepoScaleSetOK with default headers values
+func NewCreateRepoScaleSetOK() *CreateRepoScaleSetOK {
+ return &CreateRepoScaleSetOK{}
+}
+
+/*
+CreateRepoScaleSetOK describes a response with status code 200, with default header values.
+
+ScaleSet
+*/
+type CreateRepoScaleSetOK struct {
+ Payload garm_params.ScaleSet
+}
+
+// IsSuccess returns true when this create repo scale set o k response has a 2xx status code
+func (o *CreateRepoScaleSetOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this create repo scale set o k response has a 3xx status code
+func (o *CreateRepoScaleSetOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this create repo scale set o k response has a 4xx status code
+func (o *CreateRepoScaleSetOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this create repo scale set o k response has a 5xx status code
+func (o *CreateRepoScaleSetOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this create repo scale set o k response a status code equal to that given
+func (o *CreateRepoScaleSetOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the create repo scale set o k response
+func (o *CreateRepoScaleSetOK) Code() int {
+ return 200
+}
+
+func (o *CreateRepoScaleSetOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/scalesets][%d] createRepoScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateRepoScaleSetOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/scalesets][%d] createRepoScaleSetOK %s", 200, payload)
+}
+
+func (o *CreateRepoScaleSetOK) GetPayload() garm_params.ScaleSet {
+ return o.Payload
+}
+
+func (o *CreateRepoScaleSetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewCreateRepoScaleSetDefault creates a CreateRepoScaleSetDefault with default headers values
+func NewCreateRepoScaleSetDefault(code int) *CreateRepoScaleSetDefault {
+ return &CreateRepoScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+CreateRepoScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type CreateRepoScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this create repo scale set default response has a 2xx status code
+func (o *CreateRepoScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this create repo scale set default response has a 3xx status code
+func (o *CreateRepoScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this create repo scale set default response has a 4xx status code
+func (o *CreateRepoScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this create repo scale set default response has a 5xx status code
+func (o *CreateRepoScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this create repo scale set default response a status code equal to that given
+func (o *CreateRepoScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the create repo scale set default response
+func (o *CreateRepoScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *CreateRepoScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/scalesets][%d] CreateRepoScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateRepoScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/scalesets][%d] CreateRepoScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *CreateRepoScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *CreateRepoScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/delete_repo_parameters.go b/client/repositories/delete_repo_parameters.go
new file mode 100644
index 00000000..8bfd54eb
--- /dev/null
+++ b/client/repositories/delete_repo_parameters.go
@@ -0,0 +1,186 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewDeleteRepoParams creates a new DeleteRepoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteRepoParams() *DeleteRepoParams {
+ return &DeleteRepoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteRepoParamsWithTimeout creates a new DeleteRepoParams object
+// with the ability to set a timeout on a request.
+func NewDeleteRepoParamsWithTimeout(timeout time.Duration) *DeleteRepoParams {
+ return &DeleteRepoParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteRepoParamsWithContext creates a new DeleteRepoParams object
+// with the ability to set a context for a request.
+func NewDeleteRepoParamsWithContext(ctx context.Context) *DeleteRepoParams {
+ return &DeleteRepoParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteRepoParamsWithHTTPClient creates a new DeleteRepoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteRepoParamsWithHTTPClient(client *http.Client) *DeleteRepoParams {
+ return &DeleteRepoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteRepoParams contains all the parameters to send to the API endpoint
+
+ for the delete repo operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteRepoParams struct {
+
+ /* KeepWebhook.
+
+ If true and a webhook is installed for this repo, it will not be removed.
+ */
+ KeepWebhook *bool
+
+ /* RepoID.
+
+ ID of the repository to delete.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete repo params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteRepoParams) WithDefaults() *DeleteRepoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete repo params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteRepoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete repo params
+func (o *DeleteRepoParams) WithTimeout(timeout time.Duration) *DeleteRepoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete repo params
+func (o *DeleteRepoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete repo params
+func (o *DeleteRepoParams) WithContext(ctx context.Context) *DeleteRepoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete repo params
+func (o *DeleteRepoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete repo params
+func (o *DeleteRepoParams) WithHTTPClient(client *http.Client) *DeleteRepoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete repo params
+func (o *DeleteRepoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithKeepWebhook adds the keepWebhook to the delete repo params
+func (o *DeleteRepoParams) WithKeepWebhook(keepWebhook *bool) *DeleteRepoParams {
+ o.SetKeepWebhook(keepWebhook)
+ return o
+}
+
+// SetKeepWebhook adds the keepWebhook to the delete repo params
+func (o *DeleteRepoParams) SetKeepWebhook(keepWebhook *bool) {
+ o.KeepWebhook = keepWebhook
+}
+
+// WithRepoID adds the repoID to the delete repo params
+func (o *DeleteRepoParams) WithRepoID(repoID string) *DeleteRepoParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the delete repo params
+func (o *DeleteRepoParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteRepoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.KeepWebhook != nil {
+
+ // query param keepWebhook
+ var qrKeepWebhook bool
+
+ if o.KeepWebhook != nil {
+ qrKeepWebhook = *o.KeepWebhook
+ }
+ qKeepWebhook := swag.FormatBool(qrKeepWebhook)
+ if qKeepWebhook != "" {
+
+ if err := r.SetQueryParam("keepWebhook", qKeepWebhook); err != nil {
+ return err
+ }
+ }
+ }
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/delete_repo_pool_parameters.go b/client/repositories/delete_repo_pool_parameters.go
new file mode 100644
index 00000000..98aceac0
--- /dev/null
+++ b/client/repositories/delete_repo_pool_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteRepoPoolParams creates a new DeleteRepoPoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteRepoPoolParams() *DeleteRepoPoolParams {
+ return &DeleteRepoPoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteRepoPoolParamsWithTimeout creates a new DeleteRepoPoolParams object
+// with the ability to set a timeout on a request.
+func NewDeleteRepoPoolParamsWithTimeout(timeout time.Duration) *DeleteRepoPoolParams {
+ return &DeleteRepoPoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteRepoPoolParamsWithContext creates a new DeleteRepoPoolParams object
+// with the ability to set a context for a request.
+func NewDeleteRepoPoolParamsWithContext(ctx context.Context) *DeleteRepoPoolParams {
+ return &DeleteRepoPoolParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteRepoPoolParamsWithHTTPClient creates a new DeleteRepoPoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteRepoPoolParamsWithHTTPClient(client *http.Client) *DeleteRepoPoolParams {
+ return &DeleteRepoPoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteRepoPoolParams contains all the parameters to send to the API endpoint
+
+ for the delete repo pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteRepoPoolParams struct {
+
+ /* PoolID.
+
+ ID of the repository pool to delete.
+ */
+ PoolID string
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete repo pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteRepoPoolParams) WithDefaults() *DeleteRepoPoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete repo pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteRepoPoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete repo pool params
+func (o *DeleteRepoPoolParams) WithTimeout(timeout time.Duration) *DeleteRepoPoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete repo pool params
+func (o *DeleteRepoPoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete repo pool params
+func (o *DeleteRepoPoolParams) WithContext(ctx context.Context) *DeleteRepoPoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete repo pool params
+func (o *DeleteRepoPoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete repo pool params
+func (o *DeleteRepoPoolParams) WithHTTPClient(client *http.Client) *DeleteRepoPoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete repo pool params
+func (o *DeleteRepoPoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithPoolID adds the poolID to the delete repo pool params
+func (o *DeleteRepoPoolParams) WithPoolID(poolID string) *DeleteRepoPoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the delete repo pool params
+func (o *DeleteRepoPoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WithRepoID adds the repoID to the delete repo pool params
+func (o *DeleteRepoPoolParams) WithRepoID(repoID string) *DeleteRepoPoolParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the delete repo pool params
+func (o *DeleteRepoPoolParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteRepoPoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/delete_repo_pool_responses.go b/client/repositories/delete_repo_pool_responses.go
new file mode 100644
index 00000000..dd34884f
--- /dev/null
+++ b/client/repositories/delete_repo_pool_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteRepoPoolReader is a Reader for the DeleteRepoPool structure.
+type DeleteRepoPoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteRepoPoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteRepoPoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteRepoPoolDefault creates a DeleteRepoPoolDefault with default headers values
+func NewDeleteRepoPoolDefault(code int) *DeleteRepoPoolDefault {
+ return &DeleteRepoPoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteRepoPoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteRepoPoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete repo pool default response has a 2xx status code
+func (o *DeleteRepoPoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete repo pool default response has a 3xx status code
+func (o *DeleteRepoPoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete repo pool default response has a 4xx status code
+func (o *DeleteRepoPoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete repo pool default response has a 5xx status code
+func (o *DeleteRepoPoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete repo pool default response a status code equal to that given
+func (o *DeleteRepoPoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete repo pool default response
+func (o *DeleteRepoPoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteRepoPoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}/pools/{poolID}][%d] DeleteRepoPool default %s", o._statusCode, payload)
+}
+
+func (o *DeleteRepoPoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}/pools/{poolID}][%d] DeleteRepoPool default %s", o._statusCode, payload)
+}
+
+func (o *DeleteRepoPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteRepoPoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/delete_repo_responses.go b/client/repositories/delete_repo_responses.go
new file mode 100644
index 00000000..195d46e7
--- /dev/null
+++ b/client/repositories/delete_repo_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteRepoReader is a Reader for the DeleteRepo structure.
+type DeleteRepoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteRepoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteRepoDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteRepoDefault creates a DeleteRepoDefault with default headers values
+func NewDeleteRepoDefault(code int) *DeleteRepoDefault {
+ return &DeleteRepoDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteRepoDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteRepoDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete repo default response has a 2xx status code
+func (o *DeleteRepoDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete repo default response has a 3xx status code
+func (o *DeleteRepoDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete repo default response has a 4xx status code
+func (o *DeleteRepoDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete repo default response has a 5xx status code
+func (o *DeleteRepoDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete repo default response a status code equal to that given
+func (o *DeleteRepoDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete repo default response
+func (o *DeleteRepoDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteRepoDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}][%d] DeleteRepo default %s", o._statusCode, payload)
+}
+
+func (o *DeleteRepoDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}][%d] DeleteRepo default %s", o._statusCode, payload)
+}
+
+func (o *DeleteRepoDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteRepoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/get_repo_parameters.go b/client/repositories/get_repo_parameters.go
new file mode 100644
index 00000000..5bbd2555
--- /dev/null
+++ b/client/repositories/get_repo_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetRepoParams creates a new GetRepoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetRepoParams() *GetRepoParams {
+ return &GetRepoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetRepoParamsWithTimeout creates a new GetRepoParams object
+// with the ability to set a timeout on a request.
+func NewGetRepoParamsWithTimeout(timeout time.Duration) *GetRepoParams {
+ return &GetRepoParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetRepoParamsWithContext creates a new GetRepoParams object
+// with the ability to set a context for a request.
+func NewGetRepoParamsWithContext(ctx context.Context) *GetRepoParams {
+ return &GetRepoParams{
+ Context: ctx,
+ }
+}
+
+// NewGetRepoParamsWithHTTPClient creates a new GetRepoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetRepoParamsWithHTTPClient(client *http.Client) *GetRepoParams {
+ return &GetRepoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetRepoParams contains all the parameters to send to the API endpoint
+
+ for the get repo operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetRepoParams struct {
+
+ /* RepoID.
+
+ ID of the repository to fetch.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get repo params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRepoParams) WithDefaults() *GetRepoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get repo params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRepoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get repo params
+func (o *GetRepoParams) WithTimeout(timeout time.Duration) *GetRepoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get repo params
+func (o *GetRepoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get repo params
+func (o *GetRepoParams) WithContext(ctx context.Context) *GetRepoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get repo params
+func (o *GetRepoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get repo params
+func (o *GetRepoParams) WithHTTPClient(client *http.Client) *GetRepoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get repo params
+func (o *GetRepoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithRepoID adds the repoID to the get repo params
+func (o *GetRepoParams) WithRepoID(repoID string) *GetRepoParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the get repo params
+func (o *GetRepoParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetRepoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/get_repo_pool_parameters.go b/client/repositories/get_repo_pool_parameters.go
new file mode 100644
index 00000000..122b8b71
--- /dev/null
+++ b/client/repositories/get_repo_pool_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetRepoPoolParams creates a new GetRepoPoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetRepoPoolParams() *GetRepoPoolParams {
+ return &GetRepoPoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetRepoPoolParamsWithTimeout creates a new GetRepoPoolParams object
+// with the ability to set a timeout on a request.
+func NewGetRepoPoolParamsWithTimeout(timeout time.Duration) *GetRepoPoolParams {
+ return &GetRepoPoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetRepoPoolParamsWithContext creates a new GetRepoPoolParams object
+// with the ability to set a context for a request.
+func NewGetRepoPoolParamsWithContext(ctx context.Context) *GetRepoPoolParams {
+ return &GetRepoPoolParams{
+ Context: ctx,
+ }
+}
+
+// NewGetRepoPoolParamsWithHTTPClient creates a new GetRepoPoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetRepoPoolParamsWithHTTPClient(client *http.Client) *GetRepoPoolParams {
+ return &GetRepoPoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetRepoPoolParams contains all the parameters to send to the API endpoint
+
+ for the get repo pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetRepoPoolParams struct {
+
+ /* PoolID.
+
+ Pool ID.
+ */
+ PoolID string
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get repo pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRepoPoolParams) WithDefaults() *GetRepoPoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get repo pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRepoPoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get repo pool params
+func (o *GetRepoPoolParams) WithTimeout(timeout time.Duration) *GetRepoPoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get repo pool params
+func (o *GetRepoPoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get repo pool params
+func (o *GetRepoPoolParams) WithContext(ctx context.Context) *GetRepoPoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get repo pool params
+func (o *GetRepoPoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get repo pool params
+func (o *GetRepoPoolParams) WithHTTPClient(client *http.Client) *GetRepoPoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get repo pool params
+func (o *GetRepoPoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithPoolID adds the poolID to the get repo pool params
+func (o *GetRepoPoolParams) WithPoolID(poolID string) *GetRepoPoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the get repo pool params
+func (o *GetRepoPoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WithRepoID adds the repoID to the get repo pool params
+func (o *GetRepoPoolParams) WithRepoID(repoID string) *GetRepoPoolParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the get repo pool params
+func (o *GetRepoPoolParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetRepoPoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/get_repo_pool_responses.go b/client/repositories/get_repo_pool_responses.go
new file mode 100644
index 00000000..eb6e73d3
--- /dev/null
+++ b/client/repositories/get_repo_pool_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetRepoPoolReader is a Reader for the GetRepoPool structure.
+type GetRepoPoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetRepoPoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetRepoPoolOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetRepoPoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetRepoPoolOK creates a GetRepoPoolOK with default headers values
+func NewGetRepoPoolOK() *GetRepoPoolOK {
+ return &GetRepoPoolOK{}
+}
+
+/*
+GetRepoPoolOK describes a response with status code 200, with default header values.
+
+Pool
+*/
+type GetRepoPoolOK struct {
+ Payload garm_params.Pool
+}
+
+// IsSuccess returns true when this get repo pool o k response has a 2xx status code
+func (o *GetRepoPoolOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get repo pool o k response has a 3xx status code
+func (o *GetRepoPoolOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get repo pool o k response has a 4xx status code
+func (o *GetRepoPoolOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get repo pool o k response has a 5xx status code
+func (o *GetRepoPoolOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get repo pool o k response a status code equal to that given
+func (o *GetRepoPoolOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get repo pool o k response
+func (o *GetRepoPoolOK) Code() int {
+ return 200
+}
+
+func (o *GetRepoPoolOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] getRepoPoolOK %s", 200, payload)
+}
+
+func (o *GetRepoPoolOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] getRepoPoolOK %s", 200, payload)
+}
+
+func (o *GetRepoPoolOK) GetPayload() garm_params.Pool {
+ return o.Payload
+}
+
+func (o *GetRepoPoolOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetRepoPoolDefault creates a GetRepoPoolDefault with default headers values
+func NewGetRepoPoolDefault(code int) *GetRepoPoolDefault {
+ return &GetRepoPoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetRepoPoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetRepoPoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get repo pool default response has a 2xx status code
+func (o *GetRepoPoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get repo pool default response has a 3xx status code
+func (o *GetRepoPoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get repo pool default response has a 4xx status code
+func (o *GetRepoPoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get repo pool default response has a 5xx status code
+func (o *GetRepoPoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get repo pool default response a status code equal to that given
+func (o *GetRepoPoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get repo pool default response
+func (o *GetRepoPoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetRepoPoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] GetRepoPool default %s", o._statusCode, payload)
+}
+
+func (o *GetRepoPoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools/{poolID}][%d] GetRepoPool default %s", o._statusCode, payload)
+}
+
+func (o *GetRepoPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetRepoPoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/get_repo_responses.go b/client/repositories/get_repo_responses.go
new file mode 100644
index 00000000..70f25fc8
--- /dev/null
+++ b/client/repositories/get_repo_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetRepoReader is a Reader for the GetRepo structure.
+type GetRepoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetRepoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetRepoOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetRepoDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetRepoOK creates a GetRepoOK with default headers values
+func NewGetRepoOK() *GetRepoOK {
+ return &GetRepoOK{}
+}
+
+/*
+GetRepoOK describes a response with status code 200, with default header values.
+
+Repository
+*/
+type GetRepoOK struct {
+ Payload garm_params.Repository
+}
+
+// IsSuccess returns true when this get repo o k response has a 2xx status code
+func (o *GetRepoOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get repo o k response has a 3xx status code
+func (o *GetRepoOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get repo o k response has a 4xx status code
+func (o *GetRepoOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get repo o k response has a 5xx status code
+func (o *GetRepoOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get repo o k response a status code equal to that given
+func (o *GetRepoOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get repo o k response
+func (o *GetRepoOK) Code() int {
+ return 200
+}
+
+func (o *GetRepoOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}][%d] getRepoOK %s", 200, payload)
+}
+
+func (o *GetRepoOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}][%d] getRepoOK %s", 200, payload)
+}
+
+func (o *GetRepoOK) GetPayload() garm_params.Repository {
+ return o.Payload
+}
+
+func (o *GetRepoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetRepoDefault creates a GetRepoDefault with default headers values
+func NewGetRepoDefault(code int) *GetRepoDefault {
+ return &GetRepoDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetRepoDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetRepoDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get repo default response has a 2xx status code
+func (o *GetRepoDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get repo default response has a 3xx status code
+func (o *GetRepoDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get repo default response has a 4xx status code
+func (o *GetRepoDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get repo default response has a 5xx status code
+func (o *GetRepoDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get repo default response a status code equal to that given
+func (o *GetRepoDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get repo default response
+func (o *GetRepoDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetRepoDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}][%d] GetRepo default %s", o._statusCode, payload)
+}
+
+func (o *GetRepoDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}][%d] GetRepo default %s", o._statusCode, payload)
+}
+
+func (o *GetRepoDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetRepoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/get_repo_webhook_info_parameters.go b/client/repositories/get_repo_webhook_info_parameters.go
new file mode 100644
index 00000000..b4c9e515
--- /dev/null
+++ b/client/repositories/get_repo_webhook_info_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetRepoWebhookInfoParams creates a new GetRepoWebhookInfoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetRepoWebhookInfoParams() *GetRepoWebhookInfoParams {
+ return &GetRepoWebhookInfoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetRepoWebhookInfoParamsWithTimeout creates a new GetRepoWebhookInfoParams object
+// with the ability to set a timeout on a request.
+func NewGetRepoWebhookInfoParamsWithTimeout(timeout time.Duration) *GetRepoWebhookInfoParams {
+ return &GetRepoWebhookInfoParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetRepoWebhookInfoParamsWithContext creates a new GetRepoWebhookInfoParams object
+// with the ability to set a context for a request.
+func NewGetRepoWebhookInfoParamsWithContext(ctx context.Context) *GetRepoWebhookInfoParams {
+ return &GetRepoWebhookInfoParams{
+ Context: ctx,
+ }
+}
+
+// NewGetRepoWebhookInfoParamsWithHTTPClient creates a new GetRepoWebhookInfoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetRepoWebhookInfoParamsWithHTTPClient(client *http.Client) *GetRepoWebhookInfoParams {
+ return &GetRepoWebhookInfoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetRepoWebhookInfoParams contains all the parameters to send to the API endpoint
+
+ for the get repo webhook info operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetRepoWebhookInfoParams struct {
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get repo webhook info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRepoWebhookInfoParams) WithDefaults() *GetRepoWebhookInfoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get repo webhook info params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRepoWebhookInfoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) WithTimeout(timeout time.Duration) *GetRepoWebhookInfoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) WithContext(ctx context.Context) *GetRepoWebhookInfoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) WithHTTPClient(client *http.Client) *GetRepoWebhookInfoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithRepoID adds the repoID to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) WithRepoID(repoID string) *GetRepoWebhookInfoParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the get repo webhook info params
+func (o *GetRepoWebhookInfoParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetRepoWebhookInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/get_repo_webhook_info_responses.go b/client/repositories/get_repo_webhook_info_responses.go
new file mode 100644
index 00000000..c72d3815
--- /dev/null
+++ b/client/repositories/get_repo_webhook_info_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetRepoWebhookInfoReader is a Reader for the GetRepoWebhookInfo structure.
+type GetRepoWebhookInfoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetRepoWebhookInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetRepoWebhookInfoOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetRepoWebhookInfoDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetRepoWebhookInfoOK creates a GetRepoWebhookInfoOK with default headers values
+func NewGetRepoWebhookInfoOK() *GetRepoWebhookInfoOK {
+ return &GetRepoWebhookInfoOK{}
+}
+
+/*
+GetRepoWebhookInfoOK describes a response with status code 200, with default header values.
+
+HookInfo
+*/
+type GetRepoWebhookInfoOK struct {
+ Payload garm_params.HookInfo
+}
+
+// IsSuccess returns true when this get repo webhook info o k response has a 2xx status code
+func (o *GetRepoWebhookInfoOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get repo webhook info o k response has a 3xx status code
+func (o *GetRepoWebhookInfoOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get repo webhook info o k response has a 4xx status code
+func (o *GetRepoWebhookInfoOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get repo webhook info o k response has a 5xx status code
+func (o *GetRepoWebhookInfoOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get repo webhook info o k response a status code equal to that given
+func (o *GetRepoWebhookInfoOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get repo webhook info o k response
+func (o *GetRepoWebhookInfoOK) Code() int {
+ return 200
+}
+
+func (o *GetRepoWebhookInfoOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/webhook][%d] getRepoWebhookInfoOK %s", 200, payload)
+}
+
+func (o *GetRepoWebhookInfoOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/webhook][%d] getRepoWebhookInfoOK %s", 200, payload)
+}
+
+func (o *GetRepoWebhookInfoOK) GetPayload() garm_params.HookInfo {
+ return o.Payload
+}
+
+func (o *GetRepoWebhookInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetRepoWebhookInfoDefault creates a GetRepoWebhookInfoDefault with default headers values
+func NewGetRepoWebhookInfoDefault(code int) *GetRepoWebhookInfoDefault {
+ return &GetRepoWebhookInfoDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetRepoWebhookInfoDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetRepoWebhookInfoDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get repo webhook info default response has a 2xx status code
+func (o *GetRepoWebhookInfoDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get repo webhook info default response has a 3xx status code
+func (o *GetRepoWebhookInfoDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get repo webhook info default response has a 4xx status code
+func (o *GetRepoWebhookInfoDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get repo webhook info default response has a 5xx status code
+func (o *GetRepoWebhookInfoDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get repo webhook info default response a status code equal to that given
+func (o *GetRepoWebhookInfoDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get repo webhook info default response
+func (o *GetRepoWebhookInfoDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetRepoWebhookInfoDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/webhook][%d] GetRepoWebhookInfo default %s", o._statusCode, payload)
+}
+
+func (o *GetRepoWebhookInfoDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/webhook][%d] GetRepoWebhookInfo default %s", o._statusCode, payload)
+}
+
+func (o *GetRepoWebhookInfoDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetRepoWebhookInfoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/install_repo_webhook_parameters.go b/client/repositories/install_repo_webhook_parameters.go
new file mode 100644
index 00000000..933ff1b6
--- /dev/null
+++ b/client/repositories/install_repo_webhook_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewInstallRepoWebhookParams creates a new InstallRepoWebhookParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewInstallRepoWebhookParams() *InstallRepoWebhookParams {
+ return &InstallRepoWebhookParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewInstallRepoWebhookParamsWithTimeout creates a new InstallRepoWebhookParams object
+// with the ability to set a timeout on a request.
+func NewInstallRepoWebhookParamsWithTimeout(timeout time.Duration) *InstallRepoWebhookParams {
+ return &InstallRepoWebhookParams{
+ timeout: timeout,
+ }
+}
+
+// NewInstallRepoWebhookParamsWithContext creates a new InstallRepoWebhookParams object
+// with the ability to set a context for a request.
+func NewInstallRepoWebhookParamsWithContext(ctx context.Context) *InstallRepoWebhookParams {
+ return &InstallRepoWebhookParams{
+ Context: ctx,
+ }
+}
+
+// NewInstallRepoWebhookParamsWithHTTPClient creates a new InstallRepoWebhookParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewInstallRepoWebhookParamsWithHTTPClient(client *http.Client) *InstallRepoWebhookParams {
+ return &InstallRepoWebhookParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+InstallRepoWebhookParams contains all the parameters to send to the API endpoint
+
+ for the install repo webhook operation.
+
+ Typically these are written to a http.Request.
+*/
+type InstallRepoWebhookParams struct {
+
+ /* Body.
+
+ Parameters used when creating the repository webhook.
+ */
+ Body garm_params.InstallWebhookParams
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the install repo webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *InstallRepoWebhookParams) WithDefaults() *InstallRepoWebhookParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the install repo webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *InstallRepoWebhookParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the install repo webhook params
+func (o *InstallRepoWebhookParams) WithTimeout(timeout time.Duration) *InstallRepoWebhookParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the install repo webhook params
+func (o *InstallRepoWebhookParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the install repo webhook params
+func (o *InstallRepoWebhookParams) WithContext(ctx context.Context) *InstallRepoWebhookParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the install repo webhook params
+func (o *InstallRepoWebhookParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the install repo webhook params
+func (o *InstallRepoWebhookParams) WithHTTPClient(client *http.Client) *InstallRepoWebhookParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the install repo webhook params
+func (o *InstallRepoWebhookParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the install repo webhook params
+func (o *InstallRepoWebhookParams) WithBody(body garm_params.InstallWebhookParams) *InstallRepoWebhookParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the install repo webhook params
+func (o *InstallRepoWebhookParams) SetBody(body garm_params.InstallWebhookParams) {
+ o.Body = body
+}
+
+// WithRepoID adds the repoID to the install repo webhook params
+func (o *InstallRepoWebhookParams) WithRepoID(repoID string) *InstallRepoWebhookParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the install repo webhook params
+func (o *InstallRepoWebhookParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *InstallRepoWebhookParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/install_repo_webhook_responses.go b/client/repositories/install_repo_webhook_responses.go
new file mode 100644
index 00000000..c8690bcc
--- /dev/null
+++ b/client/repositories/install_repo_webhook_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// InstallRepoWebhookReader is a Reader for the InstallRepoWebhook structure.
+type InstallRepoWebhookReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *InstallRepoWebhookReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewInstallRepoWebhookOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewInstallRepoWebhookDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewInstallRepoWebhookOK creates a InstallRepoWebhookOK with default headers values
+func NewInstallRepoWebhookOK() *InstallRepoWebhookOK {
+ return &InstallRepoWebhookOK{}
+}
+
+/*
+InstallRepoWebhookOK describes a response with status code 200, with default header values.
+
+HookInfo
+*/
+type InstallRepoWebhookOK struct {
+ Payload garm_params.HookInfo
+}
+
+// IsSuccess returns true when this install repo webhook o k response has a 2xx status code
+func (o *InstallRepoWebhookOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this install repo webhook o k response has a 3xx status code
+func (o *InstallRepoWebhookOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this install repo webhook o k response has a 4xx status code
+func (o *InstallRepoWebhookOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this install repo webhook o k response has a 5xx status code
+func (o *InstallRepoWebhookOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this install repo webhook o k response a status code equal to that given
+func (o *InstallRepoWebhookOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the install repo webhook o k response
+func (o *InstallRepoWebhookOK) Code() int {
+ return 200
+}
+
+func (o *InstallRepoWebhookOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/webhook][%d] installRepoWebhookOK %s", 200, payload)
+}
+
+func (o *InstallRepoWebhookOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/webhook][%d] installRepoWebhookOK %s", 200, payload)
+}
+
+func (o *InstallRepoWebhookOK) GetPayload() garm_params.HookInfo {
+ return o.Payload
+}
+
+func (o *InstallRepoWebhookOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewInstallRepoWebhookDefault creates a InstallRepoWebhookDefault with default headers values
+func NewInstallRepoWebhookDefault(code int) *InstallRepoWebhookDefault {
+ return &InstallRepoWebhookDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+InstallRepoWebhookDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type InstallRepoWebhookDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this install repo webhook default response has a 2xx status code
+func (o *InstallRepoWebhookDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this install repo webhook default response has a 3xx status code
+func (o *InstallRepoWebhookDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this install repo webhook default response has a 4xx status code
+func (o *InstallRepoWebhookDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this install repo webhook default response has a 5xx status code
+func (o *InstallRepoWebhookDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this install repo webhook default response a status code equal to that given
+func (o *InstallRepoWebhookDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the install repo webhook default response
+func (o *InstallRepoWebhookDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *InstallRepoWebhookDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/webhook][%d] InstallRepoWebhook default %s", o._statusCode, payload)
+}
+
+func (o *InstallRepoWebhookDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[POST /repositories/{repoID}/webhook][%d] InstallRepoWebhook default %s", o._statusCode, payload)
+}
+
+func (o *InstallRepoWebhookDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *InstallRepoWebhookDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/list_repo_instances_parameters.go b/client/repositories/list_repo_instances_parameters.go
new file mode 100644
index 00000000..59f280b8
--- /dev/null
+++ b/client/repositories/list_repo_instances_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListRepoInstancesParams creates a new ListRepoInstancesParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListRepoInstancesParams() *ListRepoInstancesParams {
+ return &ListRepoInstancesParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListRepoInstancesParamsWithTimeout creates a new ListRepoInstancesParams object
+// with the ability to set a timeout on a request.
+func NewListRepoInstancesParamsWithTimeout(timeout time.Duration) *ListRepoInstancesParams {
+ return &ListRepoInstancesParams{
+ timeout: timeout,
+ }
+}
+
+// NewListRepoInstancesParamsWithContext creates a new ListRepoInstancesParams object
+// with the ability to set a context for a request.
+func NewListRepoInstancesParamsWithContext(ctx context.Context) *ListRepoInstancesParams {
+ return &ListRepoInstancesParams{
+ Context: ctx,
+ }
+}
+
+// NewListRepoInstancesParamsWithHTTPClient creates a new ListRepoInstancesParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListRepoInstancesParamsWithHTTPClient(client *http.Client) *ListRepoInstancesParams {
+ return &ListRepoInstancesParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListRepoInstancesParams contains all the parameters to send to the API endpoint
+
+ for the list repo instances operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListRepoInstancesParams struct {
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list repo instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListRepoInstancesParams) WithDefaults() *ListRepoInstancesParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list repo instances params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListRepoInstancesParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list repo instances params
+func (o *ListRepoInstancesParams) WithTimeout(timeout time.Duration) *ListRepoInstancesParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list repo instances params
+func (o *ListRepoInstancesParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list repo instances params
+func (o *ListRepoInstancesParams) WithContext(ctx context.Context) *ListRepoInstancesParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list repo instances params
+func (o *ListRepoInstancesParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list repo instances params
+func (o *ListRepoInstancesParams) WithHTTPClient(client *http.Client) *ListRepoInstancesParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list repo instances params
+func (o *ListRepoInstancesParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithRepoID adds the repoID to the list repo instances params
+func (o *ListRepoInstancesParams) WithRepoID(repoID string) *ListRepoInstancesParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the list repo instances params
+func (o *ListRepoInstancesParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListRepoInstancesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/list_repo_instances_responses.go b/client/repositories/list_repo_instances_responses.go
new file mode 100644
index 00000000..5c49b701
--- /dev/null
+++ b/client/repositories/list_repo_instances_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListRepoInstancesReader is a Reader for the ListRepoInstances structure.
+type ListRepoInstancesReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListRepoInstancesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListRepoInstancesOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListRepoInstancesDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListRepoInstancesOK creates a ListRepoInstancesOK with default headers values
+func NewListRepoInstancesOK() *ListRepoInstancesOK {
+ return &ListRepoInstancesOK{}
+}
+
+/*
+ListRepoInstancesOK describes a response with status code 200, with default header values.
+
+Instances
+*/
+type ListRepoInstancesOK struct {
+ Payload garm_params.Instances
+}
+
+// IsSuccess returns true when this list repo instances o k response has a 2xx status code
+func (o *ListRepoInstancesOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list repo instances o k response has a 3xx status code
+func (o *ListRepoInstancesOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list repo instances o k response has a 4xx status code
+func (o *ListRepoInstancesOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list repo instances o k response has a 5xx status code
+func (o *ListRepoInstancesOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list repo instances o k response a status code equal to that given
+func (o *ListRepoInstancesOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list repo instances o k response
+func (o *ListRepoInstancesOK) Code() int {
+ return 200
+}
+
+func (o *ListRepoInstancesOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] listRepoInstancesOK %s", 200, payload)
+}
+
+func (o *ListRepoInstancesOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] listRepoInstancesOK %s", 200, payload)
+}
+
+func (o *ListRepoInstancesOK) GetPayload() garm_params.Instances {
+ return o.Payload
+}
+
+func (o *ListRepoInstancesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListRepoInstancesDefault creates a ListRepoInstancesDefault with default headers values
+func NewListRepoInstancesDefault(code int) *ListRepoInstancesDefault {
+ return &ListRepoInstancesDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListRepoInstancesDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListRepoInstancesDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list repo instances default response has a 2xx status code
+func (o *ListRepoInstancesDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list repo instances default response has a 3xx status code
+func (o *ListRepoInstancesDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list repo instances default response has a 4xx status code
+func (o *ListRepoInstancesDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list repo instances default response has a 5xx status code
+func (o *ListRepoInstancesDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list repo instances default response a status code equal to that given
+func (o *ListRepoInstancesDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list repo instances default response
+func (o *ListRepoInstancesDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListRepoInstancesDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] ListRepoInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListRepoInstancesDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/instances][%d] ListRepoInstances default %s", o._statusCode, payload)
+}
+
+func (o *ListRepoInstancesDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListRepoInstancesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/list_repo_pools_parameters.go b/client/repositories/list_repo_pools_parameters.go
new file mode 100644
index 00000000..97d9d57c
--- /dev/null
+++ b/client/repositories/list_repo_pools_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListRepoPoolsParams creates a new ListRepoPoolsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListRepoPoolsParams() *ListRepoPoolsParams {
+ return &ListRepoPoolsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListRepoPoolsParamsWithTimeout creates a new ListRepoPoolsParams object
+// with the ability to set a timeout on a request.
+func NewListRepoPoolsParamsWithTimeout(timeout time.Duration) *ListRepoPoolsParams {
+ return &ListRepoPoolsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListRepoPoolsParamsWithContext creates a new ListRepoPoolsParams object
+// with the ability to set a context for a request.
+func NewListRepoPoolsParamsWithContext(ctx context.Context) *ListRepoPoolsParams {
+ return &ListRepoPoolsParams{
+ Context: ctx,
+ }
+}
+
+// NewListRepoPoolsParamsWithHTTPClient creates a new ListRepoPoolsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListRepoPoolsParamsWithHTTPClient(client *http.Client) *ListRepoPoolsParams {
+ return &ListRepoPoolsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListRepoPoolsParams contains all the parameters to send to the API endpoint
+
+ for the list repo pools operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListRepoPoolsParams struct {
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list repo pools params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListRepoPoolsParams) WithDefaults() *ListRepoPoolsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list repo pools params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListRepoPoolsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list repo pools params
+func (o *ListRepoPoolsParams) WithTimeout(timeout time.Duration) *ListRepoPoolsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list repo pools params
+func (o *ListRepoPoolsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list repo pools params
+func (o *ListRepoPoolsParams) WithContext(ctx context.Context) *ListRepoPoolsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list repo pools params
+func (o *ListRepoPoolsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list repo pools params
+func (o *ListRepoPoolsParams) WithHTTPClient(client *http.Client) *ListRepoPoolsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list repo pools params
+func (o *ListRepoPoolsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithRepoID adds the repoID to the list repo pools params
+func (o *ListRepoPoolsParams) WithRepoID(repoID string) *ListRepoPoolsParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the list repo pools params
+func (o *ListRepoPoolsParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListRepoPoolsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/list_repo_pools_responses.go b/client/repositories/list_repo_pools_responses.go
new file mode 100644
index 00000000..c16777cb
--- /dev/null
+++ b/client/repositories/list_repo_pools_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListRepoPoolsReader is a Reader for the ListRepoPools structure.
+type ListRepoPoolsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListRepoPoolsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListRepoPoolsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListRepoPoolsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListRepoPoolsOK creates a ListRepoPoolsOK with default headers values
+func NewListRepoPoolsOK() *ListRepoPoolsOK {
+ return &ListRepoPoolsOK{}
+}
+
+/*
+ListRepoPoolsOK describes a response with status code 200, with default header values.
+
+Pools
+*/
+type ListRepoPoolsOK struct {
+ Payload garm_params.Pools
+}
+
+// IsSuccess returns true when this list repo pools o k response has a 2xx status code
+func (o *ListRepoPoolsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list repo pools o k response has a 3xx status code
+func (o *ListRepoPoolsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list repo pools o k response has a 4xx status code
+func (o *ListRepoPoolsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list repo pools o k response has a 5xx status code
+func (o *ListRepoPoolsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list repo pools o k response a status code equal to that given
+func (o *ListRepoPoolsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list repo pools o k response
+func (o *ListRepoPoolsOK) Code() int {
+ return 200
+}
+
+func (o *ListRepoPoolsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] listRepoPoolsOK %s", 200, payload)
+}
+
+func (o *ListRepoPoolsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] listRepoPoolsOK %s", 200, payload)
+}
+
+func (o *ListRepoPoolsOK) GetPayload() garm_params.Pools {
+ return o.Payload
+}
+
+func (o *ListRepoPoolsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListRepoPoolsDefault creates a ListRepoPoolsDefault with default headers values
+func NewListRepoPoolsDefault(code int) *ListRepoPoolsDefault {
+ return &ListRepoPoolsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListRepoPoolsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListRepoPoolsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list repo pools default response has a 2xx status code
+func (o *ListRepoPoolsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list repo pools default response has a 3xx status code
+func (o *ListRepoPoolsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list repo pools default response has a 4xx status code
+func (o *ListRepoPoolsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list repo pools default response has a 5xx status code
+func (o *ListRepoPoolsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list repo pools default response a status code equal to that given
+func (o *ListRepoPoolsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list repo pools default response
+func (o *ListRepoPoolsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListRepoPoolsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] ListRepoPools default %s", o._statusCode, payload)
+}
+
+func (o *ListRepoPoolsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/pools][%d] ListRepoPools default %s", o._statusCode, payload)
+}
+
+func (o *ListRepoPoolsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListRepoPoolsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/list_repo_scale_sets_parameters.go b/client/repositories/list_repo_scale_sets_parameters.go
new file mode 100644
index 00000000..2582c498
--- /dev/null
+++ b/client/repositories/list_repo_scale_sets_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListRepoScaleSetsParams creates a new ListRepoScaleSetsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListRepoScaleSetsParams() *ListRepoScaleSetsParams {
+ return &ListRepoScaleSetsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListRepoScaleSetsParamsWithTimeout creates a new ListRepoScaleSetsParams object
+// with the ability to set a timeout on a request.
+func NewListRepoScaleSetsParamsWithTimeout(timeout time.Duration) *ListRepoScaleSetsParams {
+ return &ListRepoScaleSetsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListRepoScaleSetsParamsWithContext creates a new ListRepoScaleSetsParams object
+// with the ability to set a context for a request.
+func NewListRepoScaleSetsParamsWithContext(ctx context.Context) *ListRepoScaleSetsParams {
+ return &ListRepoScaleSetsParams{
+ Context: ctx,
+ }
+}
+
+// NewListRepoScaleSetsParamsWithHTTPClient creates a new ListRepoScaleSetsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListRepoScaleSetsParamsWithHTTPClient(client *http.Client) *ListRepoScaleSetsParams {
+ return &ListRepoScaleSetsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListRepoScaleSetsParams contains all the parameters to send to the API endpoint
+
+ for the list repo scale sets operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListRepoScaleSetsParams struct {
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list repo scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListRepoScaleSetsParams) WithDefaults() *ListRepoScaleSetsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list repo scale sets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListRepoScaleSetsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) WithTimeout(timeout time.Duration) *ListRepoScaleSetsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) WithContext(ctx context.Context) *ListRepoScaleSetsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) WithHTTPClient(client *http.Client) *ListRepoScaleSetsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithRepoID adds the repoID to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) WithRepoID(repoID string) *ListRepoScaleSetsParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the list repo scale sets params
+func (o *ListRepoScaleSetsParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListRepoScaleSetsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/list_repo_scale_sets_responses.go b/client/repositories/list_repo_scale_sets_responses.go
new file mode 100644
index 00000000..4e2d98a2
--- /dev/null
+++ b/client/repositories/list_repo_scale_sets_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListRepoScaleSetsReader is a Reader for the ListRepoScaleSets structure.
+type ListRepoScaleSetsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListRepoScaleSetsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListRepoScaleSetsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListRepoScaleSetsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListRepoScaleSetsOK creates a ListRepoScaleSetsOK with default headers values
+func NewListRepoScaleSetsOK() *ListRepoScaleSetsOK {
+ return &ListRepoScaleSetsOK{}
+}
+
+/*
+ListRepoScaleSetsOK describes a response with status code 200, with default header values.
+
+ScaleSets
+*/
+type ListRepoScaleSetsOK struct {
+ Payload garm_params.ScaleSets
+}
+
+// IsSuccess returns true when this list repo scale sets o k response has a 2xx status code
+func (o *ListRepoScaleSetsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list repo scale sets o k response has a 3xx status code
+func (o *ListRepoScaleSetsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list repo scale sets o k response has a 4xx status code
+func (o *ListRepoScaleSetsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list repo scale sets o k response has a 5xx status code
+func (o *ListRepoScaleSetsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list repo scale sets o k response a status code equal to that given
+func (o *ListRepoScaleSetsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list repo scale sets o k response
+func (o *ListRepoScaleSetsOK) Code() int {
+ return 200
+}
+
+func (o *ListRepoScaleSetsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/scalesets][%d] listRepoScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListRepoScaleSetsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/scalesets][%d] listRepoScaleSetsOK %s", 200, payload)
+}
+
+func (o *ListRepoScaleSetsOK) GetPayload() garm_params.ScaleSets {
+ return o.Payload
+}
+
+func (o *ListRepoScaleSetsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListRepoScaleSetsDefault creates a ListRepoScaleSetsDefault with default headers values
+func NewListRepoScaleSetsDefault(code int) *ListRepoScaleSetsDefault {
+ return &ListRepoScaleSetsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListRepoScaleSetsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListRepoScaleSetsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list repo scale sets default response has a 2xx status code
+func (o *ListRepoScaleSetsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list repo scale sets default response has a 3xx status code
+func (o *ListRepoScaleSetsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list repo scale sets default response has a 4xx status code
+func (o *ListRepoScaleSetsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list repo scale sets default response has a 5xx status code
+func (o *ListRepoScaleSetsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list repo scale sets default response a status code equal to that given
+func (o *ListRepoScaleSetsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list repo scale sets default response
+func (o *ListRepoScaleSetsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListRepoScaleSetsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/scalesets][%d] ListRepoScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListRepoScaleSetsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories/{repoID}/scalesets][%d] ListRepoScaleSets default %s", o._statusCode, payload)
+}
+
+func (o *ListRepoScaleSetsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListRepoScaleSetsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/list_repos_parameters.go b/client/repositories/list_repos_parameters.go
new file mode 100644
index 00000000..9998a1ba
--- /dev/null
+++ b/client/repositories/list_repos_parameters.go
@@ -0,0 +1,231 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListReposParams creates a new ListReposParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListReposParams() *ListReposParams {
+ return &ListReposParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListReposParamsWithTimeout creates a new ListReposParams object
+// with the ability to set a timeout on a request.
+func NewListReposParamsWithTimeout(timeout time.Duration) *ListReposParams {
+ return &ListReposParams{
+ timeout: timeout,
+ }
+}
+
+// NewListReposParamsWithContext creates a new ListReposParams object
+// with the ability to set a context for a request.
+func NewListReposParamsWithContext(ctx context.Context) *ListReposParams {
+ return &ListReposParams{
+ Context: ctx,
+ }
+}
+
+// NewListReposParamsWithHTTPClient creates a new ListReposParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListReposParamsWithHTTPClient(client *http.Client) *ListReposParams {
+ return &ListReposParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListReposParams contains all the parameters to send to the API endpoint
+
+ for the list repos operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListReposParams struct {
+
+ /* Endpoint.
+
+ Exact endpoint name to filter by
+ */
+ Endpoint *string
+
+ /* Name.
+
+ Exact repository name to filter by
+ */
+ Name *string
+
+ /* Owner.
+
+ Exact owner name to filter by
+ */
+ Owner *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list repos params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListReposParams) WithDefaults() *ListReposParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list repos params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListReposParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list repos params
+func (o *ListReposParams) WithTimeout(timeout time.Duration) *ListReposParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list repos params
+func (o *ListReposParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list repos params
+func (o *ListReposParams) WithContext(ctx context.Context) *ListReposParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list repos params
+func (o *ListReposParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list repos params
+func (o *ListReposParams) WithHTTPClient(client *http.Client) *ListReposParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list repos params
+func (o *ListReposParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEndpoint adds the endpoint to the list repos params
+func (o *ListReposParams) WithEndpoint(endpoint *string) *ListReposParams {
+ o.SetEndpoint(endpoint)
+ return o
+}
+
+// SetEndpoint adds the endpoint to the list repos params
+func (o *ListReposParams) SetEndpoint(endpoint *string) {
+ o.Endpoint = endpoint
+}
+
+// WithName adds the name to the list repos params
+func (o *ListReposParams) WithName(name *string) *ListReposParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the list repos params
+func (o *ListReposParams) SetName(name *string) {
+ o.Name = name
+}
+
+// WithOwner adds the owner to the list repos params
+func (o *ListReposParams) WithOwner(owner *string) *ListReposParams {
+ o.SetOwner(owner)
+ return o
+}
+
+// SetOwner adds the owner to the list repos params
+func (o *ListReposParams) SetOwner(owner *string) {
+ o.Owner = owner
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListReposParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.Endpoint != nil {
+
+ // query param endpoint
+ var qrEndpoint string
+
+ if o.Endpoint != nil {
+ qrEndpoint = *o.Endpoint
+ }
+ qEndpoint := qrEndpoint
+ if qEndpoint != "" {
+
+ if err := r.SetQueryParam("endpoint", qEndpoint); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Name != nil {
+
+ // query param name
+ var qrName string
+
+ if o.Name != nil {
+ qrName = *o.Name
+ }
+ qName := qrName
+ if qName != "" {
+
+ if err := r.SetQueryParam("name", qName); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Owner != nil {
+
+ // query param owner
+ var qrOwner string
+
+ if o.Owner != nil {
+ qrOwner = *o.Owner
+ }
+ qOwner := qrOwner
+ if qOwner != "" {
+
+ if err := r.SetQueryParam("owner", qOwner); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/list_repos_responses.go b/client/repositories/list_repos_responses.go
new file mode 100644
index 00000000..a45e2c0d
--- /dev/null
+++ b/client/repositories/list_repos_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListReposReader is a Reader for the ListRepos structure.
+type ListReposReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListReposReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListReposOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListReposDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListReposOK creates a ListReposOK with default headers values
+func NewListReposOK() *ListReposOK {
+ return &ListReposOK{}
+}
+
+/*
+ListReposOK describes a response with status code 200, with default header values.
+
+Repositories
+*/
+type ListReposOK struct {
+ Payload garm_params.Repositories
+}
+
+// IsSuccess returns true when this list repos o k response has a 2xx status code
+func (o *ListReposOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list repos o k response has a 3xx status code
+func (o *ListReposOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list repos o k response has a 4xx status code
+func (o *ListReposOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list repos o k response has a 5xx status code
+func (o *ListReposOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list repos o k response a status code equal to that given
+func (o *ListReposOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list repos o k response
+func (o *ListReposOK) Code() int {
+ return 200
+}
+
+func (o *ListReposOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories][%d] listReposOK %s", 200, payload)
+}
+
+func (o *ListReposOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories][%d] listReposOK %s", 200, payload)
+}
+
+func (o *ListReposOK) GetPayload() garm_params.Repositories {
+ return o.Payload
+}
+
+func (o *ListReposOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListReposDefault creates a ListReposDefault with default headers values
+func NewListReposDefault(code int) *ListReposDefault {
+ return &ListReposDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListReposDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListReposDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list repos default response has a 2xx status code
+func (o *ListReposDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list repos default response has a 3xx status code
+func (o *ListReposDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list repos default response has a 4xx status code
+func (o *ListReposDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list repos default response has a 5xx status code
+func (o *ListReposDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list repos default response a status code equal to that given
+func (o *ListReposDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list repos default response
+func (o *ListReposDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListReposDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories][%d] ListRepos default %s", o._statusCode, payload)
+}
+
+func (o *ListReposDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /repositories][%d] ListRepos default %s", o._statusCode, payload)
+}
+
+func (o *ListReposDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListReposDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/repositories_client.go b/client/repositories/repositories_client.go
new file mode 100644
index 00000000..017bf0f8
--- /dev/null
+++ b/client/repositories/repositories_client.go
@@ -0,0 +1,687 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new repositories API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new repositories API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new repositories API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for repositories API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ CreateRepo(params *CreateRepoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateRepoOK, error)
+
+ CreateRepoPool(params *CreateRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateRepoPoolOK, error)
+
+ CreateRepoScaleSet(params *CreateRepoScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateRepoScaleSetOK, error)
+
+ DeleteRepo(params *DeleteRepoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ DeleteRepoPool(params *DeleteRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ GetRepo(params *GetRepoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRepoOK, error)
+
+ GetRepoPool(params *GetRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRepoPoolOK, error)
+
+ GetRepoWebhookInfo(params *GetRepoWebhookInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRepoWebhookInfoOK, error)
+
+ InstallRepoWebhook(params *InstallRepoWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*InstallRepoWebhookOK, error)
+
+ ListRepoInstances(params *ListRepoInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListRepoInstancesOK, error)
+
+ ListRepoPools(params *ListRepoPoolsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListRepoPoolsOK, error)
+
+ ListRepoScaleSets(params *ListRepoScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListRepoScaleSetsOK, error)
+
+ ListRepos(params *ListReposParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListReposOK, error)
+
+ UninstallRepoWebhook(params *UninstallRepoWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ UpdateRepo(params *UpdateRepoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateRepoOK, error)
+
+ UpdateRepoPool(params *UpdateRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateRepoPoolOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+CreateRepo creates repository with the parameters given
+*/
+func (a *Client) CreateRepo(params *CreateRepoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateRepoOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateRepoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateRepo",
+ Method: "POST",
+ PathPattern: "/repositories",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateRepoReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateRepoOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateRepoDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+CreateRepoPool creates repository pool with the parameters given
+*/
+func (a *Client) CreateRepoPool(params *CreateRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateRepoPoolOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateRepoPoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateRepoPool",
+ Method: "POST",
+ PathPattern: "/repositories/{repoID}/pools",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateRepoPoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateRepoPoolOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateRepoPoolDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+CreateRepoScaleSet creates repository scale set with the parameters given
+*/
+func (a *Client) CreateRepoScaleSet(params *CreateRepoScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateRepoScaleSetOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewCreateRepoScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "CreateRepoScaleSet",
+ Method: "POST",
+ PathPattern: "/repositories/{repoID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &CreateRepoScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*CreateRepoScaleSetOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*CreateRepoScaleSetDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+DeleteRepo deletes repository by ID
+*/
+func (a *Client) DeleteRepo(params *DeleteRepoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteRepoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteRepo",
+ Method: "DELETE",
+ PathPattern: "/repositories/{repoID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteRepoReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+DeleteRepoPool deletes repository pool by ID
+*/
+func (a *Client) DeleteRepoPool(params *DeleteRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteRepoPoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteRepoPool",
+ Method: "DELETE",
+ PathPattern: "/repositories/{repoID}/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteRepoPoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+GetRepo gets repository by ID
+*/
+func (a *Client) GetRepo(params *GetRepoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRepoOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetRepoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetRepo",
+ Method: "GET",
+ PathPattern: "/repositories/{repoID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetRepoReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetRepoOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetRepoDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+GetRepoPool gets repository pool by ID
+*/
+func (a *Client) GetRepoPool(params *GetRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRepoPoolOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetRepoPoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetRepoPool",
+ Method: "GET",
+ PathPattern: "/repositories/{repoID}/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetRepoPoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetRepoPoolOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetRepoPoolDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+GetRepoWebhookInfo gets information about the g a r m installed webhook on a repository
+*/
+func (a *Client) GetRepoWebhookInfo(params *GetRepoWebhookInfoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetRepoWebhookInfoOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetRepoWebhookInfoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetRepoWebhookInfo",
+ Method: "GET",
+ PathPattern: "/repositories/{repoID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetRepoWebhookInfoReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetRepoWebhookInfoOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetRepoWebhookInfoDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ InstallRepoWebhook Install the GARM webhook for an organization. The secret configured on the organization will
+
+be used to validate the requests.
+*/
+func (a *Client) InstallRepoWebhook(params *InstallRepoWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*InstallRepoWebhookOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewInstallRepoWebhookParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "InstallRepoWebhook",
+ Method: "POST",
+ PathPattern: "/repositories/{repoID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &InstallRepoWebhookReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*InstallRepoWebhookOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*InstallRepoWebhookDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListRepoInstances lists repository instances
+*/
+func (a *Client) ListRepoInstances(params *ListRepoInstancesParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListRepoInstancesOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListRepoInstancesParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListRepoInstances",
+ Method: "GET",
+ PathPattern: "/repositories/{repoID}/instances",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListRepoInstancesReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListRepoInstancesOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListRepoInstancesDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListRepoPools lists repository pools
+*/
+func (a *Client) ListRepoPools(params *ListRepoPoolsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListRepoPoolsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListRepoPoolsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListRepoPools",
+ Method: "GET",
+ PathPattern: "/repositories/{repoID}/pools",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListRepoPoolsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListRepoPoolsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListRepoPoolsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListRepoScaleSets lists repository scale sets
+*/
+func (a *Client) ListRepoScaleSets(params *ListRepoScaleSetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListRepoScaleSetsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListRepoScaleSetsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListRepoScaleSets",
+ Method: "GET",
+ PathPattern: "/repositories/{repoID}/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListRepoScaleSetsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListRepoScaleSetsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListRepoScaleSetsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListRepos lists repositories
+*/
+func (a *Client) ListRepos(params *ListReposParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListReposOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListReposParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListRepos",
+ Method: "GET",
+ PathPattern: "/repositories",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListReposReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListReposOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListReposDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UninstallRepoWebhook uninstalls organization webhook
+*/
+func (a *Client) UninstallRepoWebhook(params *UninstallRepoWebhookParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUninstallRepoWebhookParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UninstallRepoWebhook",
+ Method: "DELETE",
+ PathPattern: "/repositories/{repoID}/webhook",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UninstallRepoWebhookReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+UpdateRepo updates repository with the parameters given
+*/
+func (a *Client) UpdateRepo(params *UpdateRepoParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateRepoOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateRepoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateRepo",
+ Method: "PUT",
+ PathPattern: "/repositories/{repoID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateRepoReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateRepoOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateRepoDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UpdateRepoPool updates repository pool with the parameters given
+*/
+func (a *Client) UpdateRepoPool(params *UpdateRepoPoolParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateRepoPoolOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateRepoPoolParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateRepoPool",
+ Method: "PUT",
+ PathPattern: "/repositories/{repoID}/pools/{poolID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateRepoPoolReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateRepoPoolOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateRepoPoolDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/repositories/uninstall_repo_webhook_parameters.go b/client/repositories/uninstall_repo_webhook_parameters.go
new file mode 100644
index 00000000..acefd615
--- /dev/null
+++ b/client/repositories/uninstall_repo_webhook_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewUninstallRepoWebhookParams creates a new UninstallRepoWebhookParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUninstallRepoWebhookParams() *UninstallRepoWebhookParams {
+ return &UninstallRepoWebhookParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUninstallRepoWebhookParamsWithTimeout creates a new UninstallRepoWebhookParams object
+// with the ability to set a timeout on a request.
+func NewUninstallRepoWebhookParamsWithTimeout(timeout time.Duration) *UninstallRepoWebhookParams {
+ return &UninstallRepoWebhookParams{
+ timeout: timeout,
+ }
+}
+
+// NewUninstallRepoWebhookParamsWithContext creates a new UninstallRepoWebhookParams object
+// with the ability to set a context for a request.
+func NewUninstallRepoWebhookParamsWithContext(ctx context.Context) *UninstallRepoWebhookParams {
+ return &UninstallRepoWebhookParams{
+ Context: ctx,
+ }
+}
+
+// NewUninstallRepoWebhookParamsWithHTTPClient creates a new UninstallRepoWebhookParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUninstallRepoWebhookParamsWithHTTPClient(client *http.Client) *UninstallRepoWebhookParams {
+ return &UninstallRepoWebhookParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UninstallRepoWebhookParams contains all the parameters to send to the API endpoint
+
+ for the uninstall repo webhook operation.
+
+ Typically these are written to a http.Request.
+*/
+type UninstallRepoWebhookParams struct {
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the uninstall repo webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UninstallRepoWebhookParams) WithDefaults() *UninstallRepoWebhookParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the uninstall repo webhook params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UninstallRepoWebhookParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) WithTimeout(timeout time.Duration) *UninstallRepoWebhookParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) WithContext(ctx context.Context) *UninstallRepoWebhookParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) WithHTTPClient(client *http.Client) *UninstallRepoWebhookParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithRepoID adds the repoID to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) WithRepoID(repoID string) *UninstallRepoWebhookParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the uninstall repo webhook params
+func (o *UninstallRepoWebhookParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UninstallRepoWebhookParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/uninstall_repo_webhook_responses.go b/client/repositories/uninstall_repo_webhook_responses.go
new file mode 100644
index 00000000..54a66cf1
--- /dev/null
+++ b/client/repositories/uninstall_repo_webhook_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// UninstallRepoWebhookReader is a Reader for the UninstallRepoWebhook structure.
+type UninstallRepoWebhookReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UninstallRepoWebhookReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewUninstallRepoWebhookDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewUninstallRepoWebhookDefault creates a UninstallRepoWebhookDefault with default headers values
+func NewUninstallRepoWebhookDefault(code int) *UninstallRepoWebhookDefault {
+ return &UninstallRepoWebhookDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UninstallRepoWebhookDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UninstallRepoWebhookDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this uninstall repo webhook default response has a 2xx status code
+func (o *UninstallRepoWebhookDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this uninstall repo webhook default response has a 3xx status code
+func (o *UninstallRepoWebhookDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this uninstall repo webhook default response has a 4xx status code
+func (o *UninstallRepoWebhookDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this uninstall repo webhook default response has a 5xx status code
+func (o *UninstallRepoWebhookDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this uninstall repo webhook default response a status code equal to that given
+func (o *UninstallRepoWebhookDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the uninstall repo webhook default response
+func (o *UninstallRepoWebhookDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UninstallRepoWebhookDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}/webhook][%d] UninstallRepoWebhook default %s", o._statusCode, payload)
+}
+
+func (o *UninstallRepoWebhookDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /repositories/{repoID}/webhook][%d] UninstallRepoWebhook default %s", o._statusCode, payload)
+}
+
+func (o *UninstallRepoWebhookDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UninstallRepoWebhookDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/update_repo_parameters.go b/client/repositories/update_repo_parameters.go
new file mode 100644
index 00000000..58297812
--- /dev/null
+++ b/client/repositories/update_repo_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateRepoParams creates a new UpdateRepoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateRepoParams() *UpdateRepoParams {
+ return &UpdateRepoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateRepoParamsWithTimeout creates a new UpdateRepoParams object
+// with the ability to set a timeout on a request.
+func NewUpdateRepoParamsWithTimeout(timeout time.Duration) *UpdateRepoParams {
+ return &UpdateRepoParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateRepoParamsWithContext creates a new UpdateRepoParams object
+// with the ability to set a context for a request.
+func NewUpdateRepoParamsWithContext(ctx context.Context) *UpdateRepoParams {
+ return &UpdateRepoParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateRepoParamsWithHTTPClient creates a new UpdateRepoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateRepoParamsWithHTTPClient(client *http.Client) *UpdateRepoParams {
+ return &UpdateRepoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateRepoParams contains all the parameters to send to the API endpoint
+
+ for the update repo operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateRepoParams struct {
+
+ /* Body.
+
+ Parameters used when updating the repository.
+ */
+ Body garm_params.UpdateEntityParams
+
+ /* RepoID.
+
+ ID of the repository to update.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update repo params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateRepoParams) WithDefaults() *UpdateRepoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update repo params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateRepoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update repo params
+func (o *UpdateRepoParams) WithTimeout(timeout time.Duration) *UpdateRepoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update repo params
+func (o *UpdateRepoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update repo params
+func (o *UpdateRepoParams) WithContext(ctx context.Context) *UpdateRepoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update repo params
+func (o *UpdateRepoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update repo params
+func (o *UpdateRepoParams) WithHTTPClient(client *http.Client) *UpdateRepoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update repo params
+func (o *UpdateRepoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update repo params
+func (o *UpdateRepoParams) WithBody(body garm_params.UpdateEntityParams) *UpdateRepoParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update repo params
+func (o *UpdateRepoParams) SetBody(body garm_params.UpdateEntityParams) {
+ o.Body = body
+}
+
+// WithRepoID adds the repoID to the update repo params
+func (o *UpdateRepoParams) WithRepoID(repoID string) *UpdateRepoParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the update repo params
+func (o *UpdateRepoParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateRepoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/update_repo_pool_parameters.go b/client/repositories/update_repo_pool_parameters.go
new file mode 100644
index 00000000..e96b6b94
--- /dev/null
+++ b/client/repositories/update_repo_pool_parameters.go
@@ -0,0 +1,195 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateRepoPoolParams creates a new UpdateRepoPoolParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateRepoPoolParams() *UpdateRepoPoolParams {
+ return &UpdateRepoPoolParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateRepoPoolParamsWithTimeout creates a new UpdateRepoPoolParams object
+// with the ability to set a timeout on a request.
+func NewUpdateRepoPoolParamsWithTimeout(timeout time.Duration) *UpdateRepoPoolParams {
+ return &UpdateRepoPoolParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateRepoPoolParamsWithContext creates a new UpdateRepoPoolParams object
+// with the ability to set a context for a request.
+func NewUpdateRepoPoolParamsWithContext(ctx context.Context) *UpdateRepoPoolParams {
+ return &UpdateRepoPoolParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateRepoPoolParamsWithHTTPClient creates a new UpdateRepoPoolParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateRepoPoolParamsWithHTTPClient(client *http.Client) *UpdateRepoPoolParams {
+ return &UpdateRepoPoolParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateRepoPoolParams contains all the parameters to send to the API endpoint
+
+ for the update repo pool operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateRepoPoolParams struct {
+
+ /* Body.
+
+ Parameters used when updating the repository pool.
+ */
+ Body garm_params.UpdatePoolParams
+
+ /* PoolID.
+
+ ID of the repository pool to update.
+ */
+ PoolID string
+
+ /* RepoID.
+
+ Repository ID.
+ */
+ RepoID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update repo pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateRepoPoolParams) WithDefaults() *UpdateRepoPoolParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update repo pool params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateRepoPoolParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update repo pool params
+func (o *UpdateRepoPoolParams) WithTimeout(timeout time.Duration) *UpdateRepoPoolParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update repo pool params
+func (o *UpdateRepoPoolParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update repo pool params
+func (o *UpdateRepoPoolParams) WithContext(ctx context.Context) *UpdateRepoPoolParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update repo pool params
+func (o *UpdateRepoPoolParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update repo pool params
+func (o *UpdateRepoPoolParams) WithHTTPClient(client *http.Client) *UpdateRepoPoolParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update repo pool params
+func (o *UpdateRepoPoolParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update repo pool params
+func (o *UpdateRepoPoolParams) WithBody(body garm_params.UpdatePoolParams) *UpdateRepoPoolParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update repo pool params
+func (o *UpdateRepoPoolParams) SetBody(body garm_params.UpdatePoolParams) {
+ o.Body = body
+}
+
+// WithPoolID adds the poolID to the update repo pool params
+func (o *UpdateRepoPoolParams) WithPoolID(poolID string) *UpdateRepoPoolParams {
+ o.SetPoolID(poolID)
+ return o
+}
+
+// SetPoolID adds the poolId to the update repo pool params
+func (o *UpdateRepoPoolParams) SetPoolID(poolID string) {
+ o.PoolID = poolID
+}
+
+// WithRepoID adds the repoID to the update repo pool params
+func (o *UpdateRepoPoolParams) WithRepoID(repoID string) *UpdateRepoPoolParams {
+ o.SetRepoID(repoID)
+ return o
+}
+
+// SetRepoID adds the repoId to the update repo pool params
+func (o *UpdateRepoPoolParams) SetRepoID(repoID string) {
+ o.RepoID = repoID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateRepoPoolParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param poolID
+ if err := r.SetPathParam("poolID", o.PoolID); err != nil {
+ return err
+ }
+
+ // path param repoID
+ if err := r.SetPathParam("repoID", o.RepoID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/repositories/update_repo_pool_responses.go b/client/repositories/update_repo_pool_responses.go
new file mode 100644
index 00000000..8d5da3f7
--- /dev/null
+++ b/client/repositories/update_repo_pool_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateRepoPoolReader is a Reader for the UpdateRepoPool structure.
+type UpdateRepoPoolReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateRepoPoolReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateRepoPoolOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateRepoPoolDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateRepoPoolOK creates a UpdateRepoPoolOK with default headers values
+func NewUpdateRepoPoolOK() *UpdateRepoPoolOK {
+ return &UpdateRepoPoolOK{}
+}
+
+/*
+UpdateRepoPoolOK describes a response with status code 200, with default header values.
+
+Pool
+*/
+type UpdateRepoPoolOK struct {
+ Payload garm_params.Pool
+}
+
+// IsSuccess returns true when this update repo pool o k response has a 2xx status code
+func (o *UpdateRepoPoolOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update repo pool o k response has a 3xx status code
+func (o *UpdateRepoPoolOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update repo pool o k response has a 4xx status code
+func (o *UpdateRepoPoolOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update repo pool o k response has a 5xx status code
+func (o *UpdateRepoPoolOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update repo pool o k response a status code equal to that given
+func (o *UpdateRepoPoolOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update repo pool o k response
+func (o *UpdateRepoPoolOK) Code() int {
+ return 200
+}
+
+func (o *UpdateRepoPoolOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] updateRepoPoolOK %s", 200, payload)
+}
+
+func (o *UpdateRepoPoolOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] updateRepoPoolOK %s", 200, payload)
+}
+
+func (o *UpdateRepoPoolOK) GetPayload() garm_params.Pool {
+ return o.Payload
+}
+
+func (o *UpdateRepoPoolOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateRepoPoolDefault creates a UpdateRepoPoolDefault with default headers values
+func NewUpdateRepoPoolDefault(code int) *UpdateRepoPoolDefault {
+ return &UpdateRepoPoolDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateRepoPoolDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateRepoPoolDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update repo pool default response has a 2xx status code
+func (o *UpdateRepoPoolDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update repo pool default response has a 3xx status code
+func (o *UpdateRepoPoolDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update repo pool default response has a 4xx status code
+func (o *UpdateRepoPoolDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update repo pool default response has a 5xx status code
+func (o *UpdateRepoPoolDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update repo pool default response a status code equal to that given
+func (o *UpdateRepoPoolDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update repo pool default response
+func (o *UpdateRepoPoolDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateRepoPoolDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] UpdateRepoPool default %s", o._statusCode, payload)
+}
+
+func (o *UpdateRepoPoolDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}/pools/{poolID}][%d] UpdateRepoPool default %s", o._statusCode, payload)
+}
+
+func (o *UpdateRepoPoolDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateRepoPoolDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/repositories/update_repo_responses.go b/client/repositories/update_repo_responses.go
new file mode 100644
index 00000000..117d6bb9
--- /dev/null
+++ b/client/repositories/update_repo_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package repositories
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateRepoReader is a Reader for the UpdateRepo structure.
+type UpdateRepoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateRepoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateRepoOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateRepoDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateRepoOK creates a UpdateRepoOK with default headers values
+func NewUpdateRepoOK() *UpdateRepoOK {
+ return &UpdateRepoOK{}
+}
+
+/*
+UpdateRepoOK describes a response with status code 200, with default header values.
+
+Repository
+*/
+type UpdateRepoOK struct {
+ Payload garm_params.Repository
+}
+
+// IsSuccess returns true when this update repo o k response has a 2xx status code
+func (o *UpdateRepoOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update repo o k response has a 3xx status code
+func (o *UpdateRepoOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update repo o k response has a 4xx status code
+func (o *UpdateRepoOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update repo o k response has a 5xx status code
+func (o *UpdateRepoOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update repo o k response a status code equal to that given
+func (o *UpdateRepoOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update repo o k response
+func (o *UpdateRepoOK) Code() int {
+ return 200
+}
+
+func (o *UpdateRepoOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}][%d] updateRepoOK %s", 200, payload)
+}
+
+func (o *UpdateRepoOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}][%d] updateRepoOK %s", 200, payload)
+}
+
+func (o *UpdateRepoOK) GetPayload() garm_params.Repository {
+ return o.Payload
+}
+
+func (o *UpdateRepoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateRepoDefault creates a UpdateRepoDefault with default headers values
+func NewUpdateRepoDefault(code int) *UpdateRepoDefault {
+ return &UpdateRepoDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateRepoDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateRepoDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update repo default response has a 2xx status code
+func (o *UpdateRepoDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update repo default response has a 3xx status code
+func (o *UpdateRepoDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update repo default response has a 4xx status code
+func (o *UpdateRepoDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update repo default response has a 5xx status code
+func (o *UpdateRepoDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update repo default response a status code equal to that given
+func (o *UpdateRepoDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update repo default response
+func (o *UpdateRepoDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateRepoDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}][%d] UpdateRepo default %s", o._statusCode, payload)
+}
+
+func (o *UpdateRepoDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /repositories/{repoID}][%d] UpdateRepo default %s", o._statusCode, payload)
+}
+
+func (o *UpdateRepoDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateRepoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/scalesets/delete_scale_set_parameters.go b/client/scalesets/delete_scale_set_parameters.go
new file mode 100644
index 00000000..640f95a8
--- /dev/null
+++ b/client/scalesets/delete_scale_set_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteScaleSetParams creates a new DeleteScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteScaleSetParams() *DeleteScaleSetParams {
+ return &DeleteScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteScaleSetParamsWithTimeout creates a new DeleteScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewDeleteScaleSetParamsWithTimeout(timeout time.Duration) *DeleteScaleSetParams {
+ return &DeleteScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteScaleSetParamsWithContext creates a new DeleteScaleSetParams object
+// with the ability to set a context for a request.
+func NewDeleteScaleSetParamsWithContext(ctx context.Context) *DeleteScaleSetParams {
+ return &DeleteScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteScaleSetParamsWithHTTPClient creates a new DeleteScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteScaleSetParamsWithHTTPClient(client *http.Client) *DeleteScaleSetParams {
+ return &DeleteScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the delete scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteScaleSetParams struct {
+
+ /* ScalesetID.
+
+ ID of the scale set to delete.
+ */
+ ScalesetID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteScaleSetParams) WithDefaults() *DeleteScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete scale set params
+func (o *DeleteScaleSetParams) WithTimeout(timeout time.Duration) *DeleteScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete scale set params
+func (o *DeleteScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete scale set params
+func (o *DeleteScaleSetParams) WithContext(ctx context.Context) *DeleteScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete scale set params
+func (o *DeleteScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete scale set params
+func (o *DeleteScaleSetParams) WithHTTPClient(client *http.Client) *DeleteScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete scale set params
+func (o *DeleteScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithScalesetID adds the scalesetID to the delete scale set params
+func (o *DeleteScaleSetParams) WithScalesetID(scalesetID string) *DeleteScaleSetParams {
+ o.SetScalesetID(scalesetID)
+ return o
+}
+
+// SetScalesetID adds the scalesetId to the delete scale set params
+func (o *DeleteScaleSetParams) SetScalesetID(scalesetID string) {
+ o.ScalesetID = scalesetID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param scalesetID
+ if err := r.SetPathParam("scalesetID", o.ScalesetID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/scalesets/delete_scale_set_responses.go b/client/scalesets/delete_scale_set_responses.go
new file mode 100644
index 00000000..dd0f7334
--- /dev/null
+++ b/client/scalesets/delete_scale_set_responses.go
@@ -0,0 +1,106 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+)
+
+// DeleteScaleSetReader is a Reader for the DeleteScaleSet structure.
+type DeleteScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ result := NewDeleteScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+}
+
+// NewDeleteScaleSetDefault creates a DeleteScaleSetDefault with default headers values
+func NewDeleteScaleSetDefault(code int) *DeleteScaleSetDefault {
+ return &DeleteScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+DeleteScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type DeleteScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this delete scale set default response has a 2xx status code
+func (o *DeleteScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this delete scale set default response has a 3xx status code
+func (o *DeleteScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this delete scale set default response has a 4xx status code
+func (o *DeleteScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this delete scale set default response has a 5xx status code
+func (o *DeleteScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this delete scale set default response a status code equal to that given
+func (o *DeleteScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the delete scale set default response
+func (o *DeleteScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *DeleteScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /scalesets/{scalesetID}][%d] DeleteScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *DeleteScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[DELETE /scalesets/{scalesetID}][%d] DeleteScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *DeleteScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *DeleteScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/scalesets/get_scale_set_parameters.go b/client/scalesets/get_scale_set_parameters.go
new file mode 100644
index 00000000..9e31b46e
--- /dev/null
+++ b/client/scalesets/get_scale_set_parameters.go
@@ -0,0 +1,151 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetScaleSetParams creates a new GetScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetScaleSetParams() *GetScaleSetParams {
+ return &GetScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetScaleSetParamsWithTimeout creates a new GetScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewGetScaleSetParamsWithTimeout(timeout time.Duration) *GetScaleSetParams {
+ return &GetScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetScaleSetParamsWithContext creates a new GetScaleSetParams object
+// with the ability to set a context for a request.
+func NewGetScaleSetParamsWithContext(ctx context.Context) *GetScaleSetParams {
+ return &GetScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewGetScaleSetParamsWithHTTPClient creates a new GetScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetScaleSetParamsWithHTTPClient(client *http.Client) *GetScaleSetParams {
+ return &GetScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the get scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetScaleSetParams struct {
+
+ /* ScalesetID.
+
+ ID of the scale set to fetch.
+ */
+ ScalesetID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetScaleSetParams) WithDefaults() *GetScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get scale set params
+func (o *GetScaleSetParams) WithTimeout(timeout time.Duration) *GetScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get scale set params
+func (o *GetScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get scale set params
+func (o *GetScaleSetParams) WithContext(ctx context.Context) *GetScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get scale set params
+func (o *GetScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get scale set params
+func (o *GetScaleSetParams) WithHTTPClient(client *http.Client) *GetScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get scale set params
+func (o *GetScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithScalesetID adds the scalesetID to the get scale set params
+func (o *GetScaleSetParams) WithScalesetID(scalesetID string) *GetScaleSetParams {
+ o.SetScalesetID(scalesetID)
+ return o
+}
+
+// SetScalesetID adds the scalesetId to the get scale set params
+func (o *GetScaleSetParams) SetScalesetID(scalesetID string) {
+ o.ScalesetID = scalesetID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param scalesetID
+ if err := r.SetPathParam("scalesetID", o.ScalesetID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/scalesets/get_scale_set_responses.go b/client/scalesets/get_scale_set_responses.go
new file mode 100644
index 00000000..5b30e16f
--- /dev/null
+++ b/client/scalesets/get_scale_set_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// GetScaleSetReader is a Reader for the GetScaleSet structure.
+type GetScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetScaleSetOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewGetScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewGetScaleSetOK creates a GetScaleSetOK with default headers values
+func NewGetScaleSetOK() *GetScaleSetOK {
+ return &GetScaleSetOK{}
+}
+
+/*
+GetScaleSetOK describes a response with status code 200, with default header values.
+
+ScaleSet
+*/
+type GetScaleSetOK struct {
+ Payload garm_params.ScaleSet
+}
+
+// IsSuccess returns true when this get scale set o k response has a 2xx status code
+func (o *GetScaleSetOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get scale set o k response has a 3xx status code
+func (o *GetScaleSetOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get scale set o k response has a 4xx status code
+func (o *GetScaleSetOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get scale set o k response has a 5xx status code
+func (o *GetScaleSetOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get scale set o k response a status code equal to that given
+func (o *GetScaleSetOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the get scale set o k response
+func (o *GetScaleSetOK) Code() int {
+ return 200
+}
+
+func (o *GetScaleSetOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}][%d] getScaleSetOK %s", 200, payload)
+}
+
+func (o *GetScaleSetOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}][%d] getScaleSetOK %s", 200, payload)
+}
+
+func (o *GetScaleSetOK) GetPayload() garm_params.ScaleSet {
+ return o.Payload
+}
+
+func (o *GetScaleSetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetScaleSetDefault creates a GetScaleSetDefault with default headers values
+func NewGetScaleSetDefault(code int) *GetScaleSetDefault {
+ return &GetScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+GetScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type GetScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this get scale set default response has a 2xx status code
+func (o *GetScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this get scale set default response has a 3xx status code
+func (o *GetScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this get scale set default response has a 4xx status code
+func (o *GetScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this get scale set default response has a 5xx status code
+func (o *GetScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this get scale set default response a status code equal to that given
+func (o *GetScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the get scale set default response
+func (o *GetScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *GetScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}][%d] GetScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *GetScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets/{scalesetID}][%d] GetScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *GetScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *GetScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/scalesets/list_scalesets_parameters.go b/client/scalesets/list_scalesets_parameters.go
new file mode 100644
index 00000000..b6fd1ccb
--- /dev/null
+++ b/client/scalesets/list_scalesets_parameters.go
@@ -0,0 +1,128 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewListScalesetsParams creates a new ListScalesetsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewListScalesetsParams() *ListScalesetsParams {
+ return &ListScalesetsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewListScalesetsParamsWithTimeout creates a new ListScalesetsParams object
+// with the ability to set a timeout on a request.
+func NewListScalesetsParamsWithTimeout(timeout time.Duration) *ListScalesetsParams {
+ return &ListScalesetsParams{
+ timeout: timeout,
+ }
+}
+
+// NewListScalesetsParamsWithContext creates a new ListScalesetsParams object
+// with the ability to set a context for a request.
+func NewListScalesetsParamsWithContext(ctx context.Context) *ListScalesetsParams {
+ return &ListScalesetsParams{
+ Context: ctx,
+ }
+}
+
+// NewListScalesetsParamsWithHTTPClient creates a new ListScalesetsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewListScalesetsParamsWithHTTPClient(client *http.Client) *ListScalesetsParams {
+ return &ListScalesetsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+ListScalesetsParams contains all the parameters to send to the API endpoint
+
+ for the list scalesets operation.
+
+ Typically these are written to a http.Request.
+*/
+type ListScalesetsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the list scalesets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListScalesetsParams) WithDefaults() *ListScalesetsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the list scalesets params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *ListScalesetsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the list scalesets params
+func (o *ListScalesetsParams) WithTimeout(timeout time.Duration) *ListScalesetsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the list scalesets params
+func (o *ListScalesetsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the list scalesets params
+func (o *ListScalesetsParams) WithContext(ctx context.Context) *ListScalesetsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the list scalesets params
+func (o *ListScalesetsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the list scalesets params
+func (o *ListScalesetsParams) WithHTTPClient(client *http.Client) *ListScalesetsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the list scalesets params
+func (o *ListScalesetsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *ListScalesetsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/scalesets/list_scalesets_responses.go b/client/scalesets/list_scalesets_responses.go
new file mode 100644
index 00000000..05064308
--- /dev/null
+++ b/client/scalesets/list_scalesets_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// ListScalesetsReader is a Reader for the ListScalesets structure.
+type ListScalesetsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *ListScalesetsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewListScalesetsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewListScalesetsDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewListScalesetsOK creates a ListScalesetsOK with default headers values
+func NewListScalesetsOK() *ListScalesetsOK {
+ return &ListScalesetsOK{}
+}
+
+/*
+ListScalesetsOK describes a response with status code 200, with default header values.
+
+ScaleSets
+*/
+type ListScalesetsOK struct {
+ Payload garm_params.ScaleSets
+}
+
+// IsSuccess returns true when this list scalesets o k response has a 2xx status code
+func (o *ListScalesetsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this list scalesets o k response has a 3xx status code
+func (o *ListScalesetsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this list scalesets o k response has a 4xx status code
+func (o *ListScalesetsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this list scalesets o k response has a 5xx status code
+func (o *ListScalesetsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this list scalesets o k response a status code equal to that given
+func (o *ListScalesetsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the list scalesets o k response
+func (o *ListScalesetsOK) Code() int {
+ return 200
+}
+
+func (o *ListScalesetsOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets][%d] listScalesetsOK %s", 200, payload)
+}
+
+func (o *ListScalesetsOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets][%d] listScalesetsOK %s", 200, payload)
+}
+
+func (o *ListScalesetsOK) GetPayload() garm_params.ScaleSets {
+ return o.Payload
+}
+
+func (o *ListScalesetsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewListScalesetsDefault creates a ListScalesetsDefault with default headers values
+func NewListScalesetsDefault(code int) *ListScalesetsDefault {
+ return &ListScalesetsDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+ListScalesetsDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type ListScalesetsDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this list scalesets default response has a 2xx status code
+func (o *ListScalesetsDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this list scalesets default response has a 3xx status code
+func (o *ListScalesetsDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this list scalesets default response has a 4xx status code
+func (o *ListScalesetsDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this list scalesets default response has a 5xx status code
+func (o *ListScalesetsDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this list scalesets default response a status code equal to that given
+func (o *ListScalesetsDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the list scalesets default response
+func (o *ListScalesetsDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *ListScalesetsDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets][%d] ListScalesets default %s", o._statusCode, payload)
+}
+
+func (o *ListScalesetsDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[GET /scalesets][%d] ListScalesets default %s", o._statusCode, payload)
+}
+
+func (o *ListScalesetsDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *ListScalesetsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/client/scalesets/scalesets_client.go b/client/scalesets/scalesets_client.go
new file mode 100644
index 00000000..5375750d
--- /dev/null
+++ b/client/scalesets/scalesets_client.go
@@ -0,0 +1,217 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new scalesets API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+// New creates a new scalesets API client with basic auth credentials.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - user: user for basic authentication header.
+// - password: password for basic authentication header.
+func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+// New creates a new scalesets API client with a bearer token for authentication.
+// It takes the following parameters:
+// - host: http host (github.com).
+// - basePath: any base path for the API client ("/v1", "/v3").
+// - scheme: http scheme ("http", "https").
+// - bearerToken: bearer token for Bearer authentication header.
+func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
+ transport := httptransport.New(host, basePath, []string{scheme})
+ transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
+ return &Client{transport: transport, formats: strfmt.Default}
+}
+
+/*
+Client for scalesets API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption may be used to customize the behavior of Client methods.
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ DeleteScaleSet(params *DeleteScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error
+
+ GetScaleSet(params *GetScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetScaleSetOK, error)
+
+ ListScalesets(params *ListScalesetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListScalesetsOK, error)
+
+ UpdateScaleSet(params *UpdateScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateScaleSetOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+DeleteScaleSet deletes scale set by ID
+*/
+func (a *Client) DeleteScaleSet(params *DeleteScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) error {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteScaleSet",
+ Method: "DELETE",
+ PathPattern: "/scalesets/{scalesetID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ _, err := a.transport.Submit(op)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+/*
+GetScaleSet gets scale set by ID
+*/
+func (a *Client) GetScaleSet(params *GetScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetScaleSetOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetScaleSet",
+ Method: "GET",
+ PathPattern: "/scalesets/{scalesetID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetScaleSetOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*GetScaleSetDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+ListScalesets lists all scalesets
+*/
+func (a *Client) ListScalesets(params *ListScalesetsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListScalesetsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewListScalesetsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "ListScalesets",
+ Method: "GET",
+ PathPattern: "/scalesets",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &ListScalesetsReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*ListScalesetsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*ListScalesetsDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+/*
+UpdateScaleSet updates scale set by ID
+*/
+func (a *Client) UpdateScaleSet(params *UpdateScaleSetParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*UpdateScaleSetOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewUpdateScaleSetParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "UpdateScaleSet",
+ Method: "PUT",
+ PathPattern: "/scalesets/{scalesetID}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &UpdateScaleSetReader{formats: a.formats},
+ AuthInfo: authInfo,
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*UpdateScaleSetOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ unexpectedSuccess := result.(*UpdateScaleSetDefault)
+ return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code())
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/client/scalesets/update_scale_set_parameters.go b/client/scalesets/update_scale_set_parameters.go
new file mode 100644
index 00000000..39668e9b
--- /dev/null
+++ b/client/scalesets/update_scale_set_parameters.go
@@ -0,0 +1,173 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// NewUpdateScaleSetParams creates a new UpdateScaleSetParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewUpdateScaleSetParams() *UpdateScaleSetParams {
+ return &UpdateScaleSetParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewUpdateScaleSetParamsWithTimeout creates a new UpdateScaleSetParams object
+// with the ability to set a timeout on a request.
+func NewUpdateScaleSetParamsWithTimeout(timeout time.Duration) *UpdateScaleSetParams {
+ return &UpdateScaleSetParams{
+ timeout: timeout,
+ }
+}
+
+// NewUpdateScaleSetParamsWithContext creates a new UpdateScaleSetParams object
+// with the ability to set a context for a request.
+func NewUpdateScaleSetParamsWithContext(ctx context.Context) *UpdateScaleSetParams {
+ return &UpdateScaleSetParams{
+ Context: ctx,
+ }
+}
+
+// NewUpdateScaleSetParamsWithHTTPClient creates a new UpdateScaleSetParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewUpdateScaleSetParamsWithHTTPClient(client *http.Client) *UpdateScaleSetParams {
+ return &UpdateScaleSetParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+UpdateScaleSetParams contains all the parameters to send to the API endpoint
+
+ for the update scale set operation.
+
+ Typically these are written to a http.Request.
+*/
+type UpdateScaleSetParams struct {
+
+ /* Body.
+
+ Parameters to update the scale set with.
+ */
+ Body garm_params.UpdateScaleSetParams
+
+ /* ScalesetID.
+
+ ID of the scale set to update.
+ */
+ ScalesetID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the update scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateScaleSetParams) WithDefaults() *UpdateScaleSetParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the update scale set params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *UpdateScaleSetParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the update scale set params
+func (o *UpdateScaleSetParams) WithTimeout(timeout time.Duration) *UpdateScaleSetParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the update scale set params
+func (o *UpdateScaleSetParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the update scale set params
+func (o *UpdateScaleSetParams) WithContext(ctx context.Context) *UpdateScaleSetParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the update scale set params
+func (o *UpdateScaleSetParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the update scale set params
+func (o *UpdateScaleSetParams) WithHTTPClient(client *http.Client) *UpdateScaleSetParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the update scale set params
+func (o *UpdateScaleSetParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBody adds the body to the update scale set params
+func (o *UpdateScaleSetParams) WithBody(body garm_params.UpdateScaleSetParams) *UpdateScaleSetParams {
+ o.SetBody(body)
+ return o
+}
+
+// SetBody adds the body to the update scale set params
+func (o *UpdateScaleSetParams) SetBody(body garm_params.UpdateScaleSetParams) {
+ o.Body = body
+}
+
+// WithScalesetID adds the scalesetID to the update scale set params
+func (o *UpdateScaleSetParams) WithScalesetID(scalesetID string) *UpdateScaleSetParams {
+ o.SetScalesetID(scalesetID)
+ return o
+}
+
+// SetScalesetID adds the scalesetId to the update scale set params
+func (o *UpdateScaleSetParams) SetScalesetID(scalesetID string) {
+ o.ScalesetID = scalesetID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *UpdateScaleSetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Body); err != nil {
+ return err
+ }
+
+ // path param scalesetID
+ if err := r.SetPathParam("scalesetID", o.ScalesetID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/client/scalesets/update_scale_set_responses.go b/client/scalesets/update_scale_set_responses.go
new file mode 100644
index 00000000..666e8256
--- /dev/null
+++ b/client/scalesets/update_scale_set_responses.go
@@ -0,0 +1,184 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+package scalesets
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ apiserver_params "github.com/cloudbase/garm/apiserver/params"
+ garm_params "github.com/cloudbase/garm/params"
+)
+
+// UpdateScaleSetReader is a Reader for the UpdateScaleSet structure.
+type UpdateScaleSetReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *UpdateScaleSetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewUpdateScaleSetOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ result := NewUpdateScaleSetDefault(response.Code())
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ if response.Code()/100 == 2 {
+ return result, nil
+ }
+ return nil, result
+ }
+}
+
+// NewUpdateScaleSetOK creates a UpdateScaleSetOK with default headers values
+func NewUpdateScaleSetOK() *UpdateScaleSetOK {
+ return &UpdateScaleSetOK{}
+}
+
+/*
+UpdateScaleSetOK describes a response with status code 200, with default header values.
+
+ScaleSet
+*/
+type UpdateScaleSetOK struct {
+ Payload garm_params.ScaleSet
+}
+
+// IsSuccess returns true when this update scale set o k response has a 2xx status code
+func (o *UpdateScaleSetOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this update scale set o k response has a 3xx status code
+func (o *UpdateScaleSetOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this update scale set o k response has a 4xx status code
+func (o *UpdateScaleSetOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this update scale set o k response has a 5xx status code
+func (o *UpdateScaleSetOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this update scale set o k response a status code equal to that given
+func (o *UpdateScaleSetOK) IsCode(code int) bool {
+ return code == 200
+}
+
+// Code gets the status code for the update scale set o k response
+func (o *UpdateScaleSetOK) Code() int {
+ return 200
+}
+
+func (o *UpdateScaleSetOK) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /scalesets/{scalesetID}][%d] updateScaleSetOK %s", 200, payload)
+}
+
+func (o *UpdateScaleSetOK) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /scalesets/{scalesetID}][%d] updateScaleSetOK %s", 200, payload)
+}
+
+func (o *UpdateScaleSetOK) GetPayload() garm_params.ScaleSet {
+ return o.Payload
+}
+
+func (o *UpdateScaleSetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewUpdateScaleSetDefault creates a UpdateScaleSetDefault with default headers values
+func NewUpdateScaleSetDefault(code int) *UpdateScaleSetDefault {
+ return &UpdateScaleSetDefault{
+ _statusCode: code,
+ }
+}
+
+/*
+UpdateScaleSetDefault describes a response with status code -1, with default header values.
+
+APIErrorResponse
+*/
+type UpdateScaleSetDefault struct {
+ _statusCode int
+
+ Payload apiserver_params.APIErrorResponse
+}
+
+// IsSuccess returns true when this update scale set default response has a 2xx status code
+func (o *UpdateScaleSetDefault) IsSuccess() bool {
+ return o._statusCode/100 == 2
+}
+
+// IsRedirect returns true when this update scale set default response has a 3xx status code
+func (o *UpdateScaleSetDefault) IsRedirect() bool {
+ return o._statusCode/100 == 3
+}
+
+// IsClientError returns true when this update scale set default response has a 4xx status code
+func (o *UpdateScaleSetDefault) IsClientError() bool {
+ return o._statusCode/100 == 4
+}
+
+// IsServerError returns true when this update scale set default response has a 5xx status code
+func (o *UpdateScaleSetDefault) IsServerError() bool {
+ return o._statusCode/100 == 5
+}
+
+// IsCode returns true when this update scale set default response a status code equal to that given
+func (o *UpdateScaleSetDefault) IsCode(code int) bool {
+ return o._statusCode == code
+}
+
+// Code gets the status code for the update scale set default response
+func (o *UpdateScaleSetDefault) Code() int {
+ return o._statusCode
+}
+
+func (o *UpdateScaleSetDefault) Error() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /scalesets/{scalesetID}][%d] UpdateScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *UpdateScaleSetDefault) String() string {
+ payload, _ := json.Marshal(o.Payload)
+ return fmt.Sprintf("[PUT /scalesets/{scalesetID}][%d] UpdateScaleSet default %s", o._statusCode, payload)
+}
+
+func (o *UpdateScaleSetDefault) GetPayload() apiserver_params.APIErrorResponse {
+ return o.Payload
+}
+
+func (o *UpdateScaleSetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/cloudconfig/cloudconfig.go b/cloudconfig/cloudconfig.go
deleted file mode 100644
index 04296302..00000000
--- a/cloudconfig/cloudconfig.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package cloudconfig
-
-import (
- "crypto/x509"
- "encoding/base64"
- "fmt"
- "strings"
- "sync"
-
- "github.com/cloudbase/garm/util/appdefaults"
-
- "github.com/pkg/errors"
- "gopkg.in/yaml.v3"
-)
-
-func NewDefaultCloudInitConfig() *CloudInit {
- return &CloudInit{
- PackageUpgrade: true,
- Packages: []string{
- "curl",
- "tar",
- },
- SystemInfo: &SystemInfo{
- DefaultUser: DefaultUser{
- Name: appdefaults.DefaultUser,
- Home: fmt.Sprintf("/home/%s", appdefaults.DefaultUser),
- Shell: appdefaults.DefaultUserShell,
- Groups: appdefaults.DefaultUserGroups,
- Sudo: "ALL=(ALL) NOPASSWD:ALL",
- },
- },
- }
-}
-
-type DefaultUser struct {
- Name string `yaml:"name"`
- Home string `yaml:"home"`
- Shell string `yaml:"shell"`
- Groups []string `yaml:"groups,omitempty"`
- Sudo string `yaml:"sudo"`
-}
-
-type SystemInfo struct {
- DefaultUser DefaultUser `yaml:"default_user"`
-}
-
-type File struct {
- Encoding string `yaml:"encoding"`
- Content string `yaml:"content"`
- Owner string `yaml:"owner"`
- Path string `yaml:"path"`
- Permissions string `yaml:"permissions"`
-}
-
-type CloudInit struct {
- mux sync.Mutex
-
- PackageUpgrade bool `yaml:"package_upgrade"`
- Packages []string `yaml:"packages,omitempty"`
- SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys,omitempty"`
- SystemInfo *SystemInfo `yaml:"system_info,omitempty"`
- RunCmd []string `yaml:"runcmd,omitempty"`
- WriteFiles []File `yaml:"write_files,omitempty"`
- CACerts CACerts `yaml:"ca-certs,omitempty"`
-}
-
-type CACerts struct {
- RemoveDefaults bool `yaml:"remove-defaults"`
- Trusted []string `yaml:"trusted"`
-}
-
-func (c *CloudInit) AddCACert(cert []byte) error {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- if cert == nil {
- return nil
- }
-
- roots := x509.NewCertPool()
- if ok := roots.AppendCertsFromPEM(cert); !ok {
- return fmt.Errorf("failed to parse CA cert bundle")
- }
- c.CACerts.Trusted = append(c.CACerts.Trusted, string(cert))
-
- return nil
-}
-
-func (c *CloudInit) AddSSHKey(keys ...string) {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- // TODO(gabriel-samfira): Validate the SSH public key.
- for _, key := range keys {
- found := false
- for _, val := range c.SSHAuthorizedKeys {
- if val == key {
- found = true
- break
- }
- }
- if !found {
- c.SSHAuthorizedKeys = append(c.SSHAuthorizedKeys, key)
- }
- }
-}
-
-func (c *CloudInit) AddPackage(pkgs ...string) {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- for _, pkg := range pkgs {
- found := false
- for _, val := range c.Packages {
- if val == pkg {
- found = true
- break
- }
- }
- if !found {
- c.Packages = append(c.Packages, pkg)
- }
- }
-}
-
-func (c *CloudInit) AddRunCmd(cmd string) {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- c.RunCmd = append(c.RunCmd, cmd)
-}
-
-func (c *CloudInit) AddFile(contents []byte, path, owner, permissions string) {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- for _, val := range c.WriteFiles {
- if val.Path == path {
- return
- }
- }
-
- file := File{
- Encoding: "b64",
- Content: base64.StdEncoding.EncodeToString(contents),
- Owner: owner,
- Permissions: permissions,
- Path: path,
- }
- c.WriteFiles = append(c.WriteFiles, file)
-}
-
-func (c *CloudInit) Serialize() (string, error) {
- c.mux.Lock()
- defer c.mux.Unlock()
-
- ret := []string{
- "#cloud-config",
- }
-
- asYaml, err := yaml.Marshal(c)
- if err != nil {
- return "", errors.Wrap(err, "marshaling to yaml")
- }
-
- ret = append(ret, string(asYaml))
- return strings.Join(ret, "\n"), nil
-}
diff --git a/cloudconfig/templates.go b/cloudconfig/templates.go
deleted file mode 100644
index e7c7e7db..00000000
--- a/cloudconfig/templates.go
+++ /dev/null
@@ -1,443 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package cloudconfig
-
-import (
- "bytes"
- "fmt"
- "text/template"
-
- "github.com/cloudbase/garm/params"
- "github.com/pkg/errors"
-)
-
-var CloudConfigTemplate = `#!/bin/bash
-
-set -e
-set -o pipefail
-
-CALLBACK_URL="{{ .CallbackURL }}"
-METADATA_URL="{{ .MetadataURL }}"
-BEARER_TOKEN="{{ .CallbackToken }}"
-
-if [ -z "$METADATA_URL" ];then
- echo "no token is available and METADATA_URL is not set"
- exit 1
-fi
-GITHUB_TOKEN=$(curl --retry 5 --retry-max-time 5 --fail -s -X GET -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${METADATA_URL}/runner-registration-token/")
-
-function call() {
- PAYLOAD="$1"
- curl --retry 5 --retry-max-time 5 --retry-all-errors --fail -s -X POST -d "${PAYLOAD}" -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${CALLBACK_URL}" || echo "failed to call home: exit code ($?)"
-}
-
-function sendStatus() {
- MSG="$1"
- call "{\"status\": \"installing\", \"message\": \"$MSG\"}"
-}
-
-function success() {
- MSG="$1"
- ID=$2
- call "{\"status\": \"idle\", \"message\": \"$MSG\", \"agent_id\": $ID}"
-}
-
-function fail() {
- MSG="$1"
- call "{\"status\": \"failed\", \"message\": \"$MSG\"}"
- exit 1
-}
-
-# This will echo the version number in the filename. Given a file name like: actions-runner-osx-x64-2.299.1.tar.gz
-# this will output: 2.299.1
-function getRunnerVersion() {
- FILENAME="{{ .FileName }}"
- [[ $FILENAME =~ ([0-9]+\.[0-9]+\.[0-9+]) ]]
- echo $BASH_REMATCH
-}
-
-function getCachedToolsPath() {
- CACHED_RUNNER="/opt/cache/actions-runner/latest"
- if [ -d "$CACHED_RUNNER" ];then
- echo "$CACHED_RUNNER"
- return 0
- fi
-
- VERSION=$(getRunnerVersion)
- if [ -z "$VERSION" ]; then
- return 0
- fi
-
- CACHED_RUNNER="/opt/cache/actions-runner/$VERSION"
- if [ -d "$CACHED_RUNNER" ];then
- echo "$CACHED_RUNNER"
- return 0
- fi
- return 0
-}
-
-function downloadAndExtractRunner() {
- sendStatus "downloading tools from {{ .DownloadURL }}"
- if [ ! -z "{{ .TempDownloadToken }}" ]; then
- TEMP_TOKEN="Authorization: Bearer {{ .TempDownloadToken }}"
- fi
- curl --retry 5 --retry-max-time 5 --retry-all-errors --fail -L -H "${TEMP_TOKEN}" -o "/home/{{ .RunnerUsername }}/{{ .FileName }}" "{{ .DownloadURL }}" || fail "failed to download tools"
- mkdir -p /home/runner/actions-runner || fail "failed to create actions-runner folder"
- sendStatus "extracting runner"
- tar xf "/home/{{ .RunnerUsername }}/{{ .FileName }}" -C /home/{{ .RunnerUsername }}/actions-runner/ || fail "failed to extract runner"
- chown {{ .RunnerUsername }}:{{ .RunnerGroup }} -R /home/{{ .RunnerUsername }}/actions-runner/ || fail "failed to change owner"
-}
-
-TEMP_TOKEN=""
-GH_RUNNER_GROUP="{{.GitHubRunnerGroup}}"
-
-# $RUNNER_GROUP_OPT will be added to the config.sh line. If it's empty, nothing happens
-# if it holds a value, it will be part of the command.
-RUNNER_GROUP_OPT=""
-if [ ! -z $GH_RUNNER_GROUP ];then
- RUNNER_GROUP_OPT="--runnergroup=$GH_RUNNER_GROUP"
-fi
-
-CACHED_RUNNER=$(getCachedToolsPath)
-if [ -z "$CACHED_RUNNER" ];then
- downloadAndExtractRunner
- sendStatus "installing dependencies"
- cd /home/{{ .RunnerUsername }}/actions-runner
- sudo ./bin/installdependencies.sh || fail "failed to install dependencies"
-else
- sendStatus "using cached runner found in $CACHED_RUNNER"
- sudo cp -a "$CACHED_RUNNER" "/home/{{ .RunnerUsername }}/actions-runner"
- cd /home/{{ .RunnerUsername }}/actions-runner
- chown {{ .RunnerUsername }}:{{ .RunnerGroup }} -R "/home/{{ .RunnerUsername }}/actions-runner" || fail "failed to change owner"
-fi
-
-
-sendStatus "configuring runner"
-set +e
-attempt=1
-while true; do
- ERROUT=$(mktemp)
- sudo -u {{ .RunnerUsername }} -- ./config.sh --unattended --url "{{ .RepoURL }}" --token "$GITHUB_TOKEN" $RUNNER_GROUP_OPT --name "{{ .RunnerName }}" --labels "{{ .RunnerLabels }}" --ephemeral 2>$ERROUT
- if [ $? -eq 0 ]; then
- rm $ERROUT || true
- sendStatus "runner successfully configured after $attempt attempt(s)"
- break
- fi
- LAST_ERR=$(cat $ERROUT)
- echo "$LAST_ERR"
-
- # if the runner is already configured, remove it and try again. In the past configuring a runner
- # managed to register it but timed out later, resulting in an error.
- sudo -u {{ .RunnerUsername }} -- ./config.sh remove --token "$GITHUB_TOKEN" || true
-
- if [ $attempt -gt 5 ];then
- rm $ERROUT || true
- fail "failed to configure runner: $LAST_ERR"
- fi
-
- sendStatus "failed to configure runner (attempt $attempt): $LAST_ERR (retrying in 5 seconds)"
- attempt=$((attempt+1))
- rm $ERROUT || true
- sleep 5
-done
-set -e
-
-sendStatus "installing runner service"
-./svc.sh install {{ .RunnerUsername }} || fail "failed to install service"
-
-if [ -e "/sys/fs/selinux" ];then
- sudo chcon -h user_u:object_r:bin_t /home/runner/ || fail "failed to change selinux context"
- sudo chcon -R -h {{ .RunnerUsername }}:object_r:bin_t /home/runner/* || fail "failed to change selinux context"
-fi
-
-sendStatus "starting service"
-./svc.sh start || fail "failed to start service"
-
-set +e
-AGENT_ID=$(grep "agentId" /home/{{ .RunnerUsername }}/actions-runner/.runner | tr -d -c 0-9)
-if [ $? -ne 0 ];then
- fail "failed to get agent ID"
-fi
-set -e
-
-success "runner successfully installed" $AGENT_ID
-`
-
-var WindowsSetupScriptTemplate = `#ps1_sysnative
-Param(
- [Parameter(Mandatory=$false)]
- [string]$Token="{{.CallbackToken}}"
-)
-
-$ErrorActionPreference="Stop"
-
-function Invoke-FastWebRequest {
- [CmdletBinding()]
- Param(
- [Parameter(Mandatory=$True,ValueFromPipeline=$true,Position=0)]
- [System.Uri]$Uri,
- [Parameter(Position=1)]
- [string]$OutFile,
- [Hashtable]$Headers=@{},
- [switch]$SkipIntegrityCheck=$false
- )
- PROCESS
- {
- if(!([System.Management.Automation.PSTypeName]'System.Net.Http.HttpClient').Type)
- {
- $assembly = [System.Reflection.Assembly]::LoadWithPartialName("System.Net.Http")
- }
-
- if(!$OutFile) {
- $OutFile = $Uri.PathAndQuery.Substring($Uri.PathAndQuery.LastIndexOf("/") + 1)
- if(!$OutFile) {
- throw "The ""OutFile"" parameter needs to be specified"
- }
- }
-
- $fragment = $Uri.Fragment.Trim('#')
- if ($fragment) {
- $details = $fragment.Split("=")
- $algorithm = $details[0]
- $hash = $details[1]
- }
-
- if (!$SkipIntegrityCheck -and $fragment -and (Test-Path $OutFile)) {
- try {
- return (Test-FileIntegrity -File $OutFile -Algorithm $algorithm -ExpectedHash $hash)
- } catch {
- Remove-Item $OutFile
- }
- }
-
- $client = new-object System.Net.Http.HttpClient
- foreach ($k in $Headers.Keys){
- $client.DefaultRequestHeaders.Add($k, $Headers[$k])
- }
- $task = $client.GetStreamAsync($Uri)
- $response = $task.Result
- if($task.IsFaulted) {
- $msg = "Request for URL '{0}' is faulted. Task status: {1}." -f @($Uri, $task.Status)
- if($task.Exception) {
- $msg += "Exception details: {0}" -f @($task.Exception)
- }
- Throw $msg
- }
- $outStream = New-Object IO.FileStream $OutFile, Create, Write, None
-
- try {
- $totRead = 0
- $buffer = New-Object Byte[] 1MB
- while (($read = $response.Read($buffer, 0, $buffer.Length)) -gt 0) {
- $totRead += $read
- $outStream.Write($buffer, 0, $read);
- }
- }
- finally {
- $outStream.Close()
- }
- if(!$SkipIntegrityCheck -and $fragment) {
- Test-FileIntegrity -File $OutFile -Algorithm $algorithm -ExpectedHash $hash
- }
- }
-}
-
-function Import-Certificate() {
- [CmdletBinding()]
- param (
- [parameter(Mandatory=$true)]
- [string]$CertificatePath,
- [parameter(Mandatory=$true)]
- [System.Security.Cryptography.X509Certificates.StoreLocation]$StoreLocation="LocalMachine",
- [parameter(Mandatory=$true)]
- [System.Security.Cryptography.X509Certificates.StoreName]$StoreName="TrustedPublisher"
- )
- PROCESS
- {
- $store = New-Object System.Security.Cryptography.X509Certificates.X509Store(
- $StoreName, $StoreLocation)
- $store.Open([System.Security.Cryptography.X509Certificates.OpenFlags]::ReadWrite)
- $cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2(
- $CertificatePath)
- $store.Add($cert)
- }
-}
-
-function Invoke-APICall() {
- [CmdletBinding()]
- param (
- [parameter(Mandatory=$true)]
- [object]$Payload,
- [parameter(Mandatory=$true)]
- [string]$CallbackURL
- )
- PROCESS{
- Invoke-WebRequest -UseBasicParsing -Method Post -Headers @{"Accept"="application/json"; "Authorization"="Bearer $Token"} -Uri $CallbackURL -Body (ConvertTo-Json $Payload) | Out-Null
- }
-}
-
-function Update-GarmStatus() {
- [CmdletBinding()]
- param (
- [parameter(Mandatory=$true)]
- [string]$Message,
- [parameter(Mandatory=$true)]
- [string]$CallbackURL
- )
- PROCESS{
- $body = @{
- "status"="installing"
- "message"=$Message
- }
- Invoke-APICall -Payload $body -CallbackURL $CallbackURL | Out-Null
- }
-}
-
-function Invoke-GarmSuccess() {
- [CmdletBinding()]
- param (
- [parameter(Mandatory=$true)]
- [string]$Message,
- [parameter(Mandatory=$true)]
- [int64]$AgentID,
- [parameter(Mandatory=$true)]
- [string]$CallbackURL
- )
- PROCESS{
- $body = @{
- "status"="idle"
- "message"=$Message
- "agent_id"=$AgentID
- }
- Invoke-APICall -Payload $body -CallbackURL $CallbackURL | Out-Null
- }
-}
-
-function Invoke-GarmFailure() {
- [CmdletBinding()]
- param (
- [parameter(Mandatory=$true)]
- [string]$Message,
- [parameter(Mandatory=$true)]
- [string]$CallbackURL
- )
- PROCESS{
- $body = @{
- "status"="failed"
- "message"=$Message
- }
- Invoke-APICall -Payload $body -CallbackURL $CallbackURL | Out-Null
- Throw $Message
- }
-}
-
-$PEMData = @"
-{{.CABundle}}
-"@
-$GHRunnerGroup = "{{.GitHubRunnerGroup}}"
-
-function Install-Runner() {
- $CallbackURL="{{.CallbackURL}}"
- if ($Token.Length -eq 0) {
- Throw "missing callback authentication token"
- }
- try {
- $MetadataURL="{{.MetadataURL}}"
- $DownloadURL="{{.DownloadURL}}"
- if($MetadataURL -eq ""){
- Throw "missing metadata URL"
- }
-
- if($PEMData.Trim().Length -gt 0){
- Set-Content $env:TMP\garm-ca.pem $PEMData
- Import-Certificate -CertificatePath $env:TMP\garm-ca.pem
- }
-
- $GithubRegistrationToken = Invoke-WebRequest -UseBasicParsing -Headers @{"Accept"="application/json"; "Authorization"="Bearer $Token"} -Uri $MetadataURL/runner-registration-token/
- Update-GarmStatus -CallbackURL $CallbackURL -Message "downloading tools from $DownloadURL"
-
- $downloadToken="{{.TempDownloadToken}}"
- $DownloadTokenHeaders=@{}
- if ($downloadToken.Length -gt 0) {
- $DownloadTokenHeaders=@{
- "Authorization"="Bearer $downloadToken"
- }
- }
- $downloadPath = Join-Path $env:TMP {{.FileName}}
- Invoke-FastWebRequest -Uri $DownloadURL -OutFile $downloadPath -Headers $DownloadTokenHeaders
-
- $runnerDir = "C:\runner"
- mkdir $runnerDir
-
- Update-GarmStatus -CallbackURL $CallbackURL -Message "extracting runner"
- Add-Type -AssemblyName System.IO.Compression.FileSystem
- [System.IO.Compression.ZipFile]::ExtractToDirectory($downloadPath, "$runnerDir")
- $runnerGroupOpt = ""
- if ($GHRunnerGroup.Length -gt 0){
- $runnerGroupOpt = "--runnergroup $GHRunnerGroup"
- }
- Update-GarmStatus -CallbackURL $CallbackURL -Message "configuring and starting runner"
- cd $runnerDir
- ./config.cmd --unattended --url "{{ .RepoURL }}" --token $GithubRegistrationToken $runnerGroupOpt --name "{{ .RunnerName }}" --labels "{{ .RunnerLabels }}" --ephemeral --runasservice
-
- $agentInfoFile = Join-Path $runnerDir ".runner"
- $agentInfo = ConvertFrom-Json (gc -raw $agentInfoFile)
- Invoke-GarmSuccess -CallbackURL $CallbackURL -Message "runner successfully installed" -AgentID $agentInfo.agentId
- } catch {
- Invoke-GarmFailure -CallbackURL $CallbackURL -Message $_
- }
-}
-Install-Runner
-`
-
-type InstallRunnerParams struct {
- FileName string
- DownloadURL string
- RunnerUsername string
- RunnerGroup string
- RepoURL string
- MetadataURL string
- RunnerName string
- RunnerLabels string
- CallbackURL string
- CallbackToken string
- TempDownloadToken string
- CABundle string
- GitHubRunnerGroup string
-}
-
-func InstallRunnerScript(installParams InstallRunnerParams, osType params.OSType) ([]byte, error) {
- var tpl string
- switch osType {
- case params.Linux:
- tpl = CloudConfigTemplate
- case params.Windows:
- tpl = WindowsSetupScriptTemplate
- default:
- return nil, fmt.Errorf("unsupported os type: %s", osType)
- }
-
- t, err := template.New("").Parse(tpl)
- if err != nil {
- return nil, errors.Wrap(err, "parsing template")
- }
-
- var buf bytes.Buffer
- if err := t.Execute(&buf, installParams); err != nil {
- return nil, errors.Wrap(err, "rendering template")
- }
-
- return buf.Bytes(), nil
-}
diff --git a/cmd/garm-cli/client/client.go b/cmd/garm-cli/client/client.go
deleted file mode 100644
index 7058e277..00000000
--- a/cmd/garm-cli/client/client.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package client
-
-import (
- "encoding/json"
- "fmt"
-
- apiParams "github.com/cloudbase/garm/apiserver/params"
- "github.com/cloudbase/garm/cmd/garm-cli/config"
- "github.com/cloudbase/garm/params"
-
- "github.com/go-resty/resty/v2"
- "github.com/pkg/errors"
-)
-
-func NewClient(name string, cfg config.Manager, debug bool) *Client {
- cli := resty.New()
- if cfg.Token != "" {
- cli = cli.SetAuthToken(cfg.Token)
- }
- cli = cli.
- SetHeader("Accept", "application/json").
- SetDebug(debug)
- return &Client{
- ManagerName: name,
- Config: cfg,
- client: cli,
- }
-}
-
-type Client struct {
- ManagerName string
- Config config.Manager
- client *resty.Client
-}
-
-func (c *Client) handleError(err error, resp *resty.Response) error {
- var ret error
- if err != nil {
- ret = fmt.Errorf("request returned error: %s", err)
- }
-
- if resp != nil && resp.IsError() {
- body := resp.Body()
- if len(body) > 0 {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr == nil {
- ret = fmt.Errorf("API returned error: %s", apiErr.Details)
- }
- }
- }
- return ret
-}
-
-func (c *Client) decodeAPIError(body []byte) (apiParams.APIErrorResponse, error) {
- var errDetails apiParams.APIErrorResponse
- if err := json.Unmarshal(body, &errDetails); err != nil {
- return apiParams.APIErrorResponse{}, fmt.Errorf("invalid response from server, use --debug for more info")
- }
-
- return errDetails, fmt.Errorf("error in API call: %s", errDetails.Details)
-}
-
-func (c *Client) InitManager(url string, param params.NewUserParams) (params.User, error) {
- body, err := json.Marshal(param)
- if err != nil {
- return params.User{}, errors.Wrap(err, "marshaling body")
- }
- url = fmt.Sprintf("%s/api/v1/first-run/", url)
-
- var response params.User
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Post(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return params.User{}, errors.Wrap(decErr, "sending request")
- }
- return params.User{}, fmt.Errorf("error running init: %s", apiErr.Details)
- }
-
- return response, nil
-}
-
-func (c *Client) Login(url string, param params.PasswordLoginParams) (string, error) {
- body, err := json.Marshal(param)
- if err != nil {
- return "", errors.Wrap(err, "marshaling body")
- }
- url = fmt.Sprintf("%s/api/v1/auth/login", url)
-
- var response params.JWTResponse
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Post(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return "", errors.Wrap(decErr, "sending request")
- }
- return "", fmt.Errorf("error performing login: %s", apiErr.Details)
- }
-
- return response.Token, nil
-}
-
-func (c *Client) ListCredentials() ([]params.GithubCredentials, error) {
- var ghCreds []params.GithubCredentials
- url := fmt.Sprintf("%s/api/v1/credentials", c.Config.BaseURL)
- resp, err := c.client.R().
- SetResult(&ghCreds).
- Get(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return nil, errors.Wrap(decErr, "sending request")
- }
- return nil, fmt.Errorf("error fetching credentials: %s", apiErr.Details)
- }
- return ghCreds, nil
-}
-
-func (c *Client) ListProviders() ([]params.Provider, error) {
- var providers []params.Provider
- url := fmt.Sprintf("%s/api/v1/providers", c.Config.BaseURL)
- resp, err := c.client.R().
- SetResult(&providers).
- Get(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return nil, errors.Wrap(decErr, "sending request")
- }
- return nil, fmt.Errorf("error fetching providers: %s", apiErr.Details)
- }
- return providers, nil
-}
-
-func (c *Client) GetInstanceByName(instanceName string) (params.Instance, error) {
- url := fmt.Sprintf("%s/api/v1/instances/%s", c.Config.BaseURL, instanceName)
-
- var response params.Instance
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return response, errors.Wrap(decErr, "sending request")
- }
- return response, fmt.Errorf("error performing login: %s", apiErr.Details)
- }
- return response, nil
-}
-
-func (c *Client) DeleteRunner(instanceName string) error {
- url := fmt.Sprintf("%s/api/v1/instances/%s", c.Config.BaseURL, instanceName)
- resp, err := c.client.R().
- Delete(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return errors.Wrap(decErr, "sending request")
- }
- return fmt.Errorf("error deleting runner: %s", apiErr.Details)
- }
- return nil
-}
-
-func (c *Client) ListPoolInstances(poolID string) ([]params.Instance, error) {
- url := fmt.Sprintf("%s/api/v1/pools/%s/instances", c.Config.BaseURL, poolID)
-
- var response []params.Instance
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return response, errors.Wrap(decErr, "sending request")
- }
- return response, fmt.Errorf("error performing login: %s", apiErr.Details)
- }
- return response, nil
-}
-
-func (c *Client) ListAllInstances() ([]params.Instance, error) {
- url := fmt.Sprintf("%s/api/v1/instances", c.Config.BaseURL)
-
- var response []params.Instance
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return response, errors.Wrap(decErr, "sending request")
- }
- return response, fmt.Errorf("error performing login: %s", apiErr.Details)
- }
- return response, nil
-}
-
-func (c *Client) GetPoolByID(poolID string) (params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/pools/%s", c.Config.BaseURL, poolID)
-
- var response params.Pool
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return response, errors.Wrap(decErr, "sending request")
- }
- return response, fmt.Errorf("error performing login: %s", apiErr.Details)
- }
- return response, nil
-}
-
-func (c *Client) ListAllPools() ([]params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/pools", c.Config.BaseURL)
-
- var response []params.Pool
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return response, errors.Wrap(decErr, "sending request")
- }
- return response, fmt.Errorf("error performing login: %s", apiErr.Details)
- }
- return response, nil
-}
-
-func (c *Client) DeletePoolByID(poolID string) error {
- url := fmt.Sprintf("%s/api/v1/pools/%s", c.Config.BaseURL, poolID)
- resp, err := c.client.R().
- Delete(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return errors.Wrap(decErr, "sending request")
- }
- return fmt.Errorf("error deleting pool by ID: %s", apiErr.Details)
- }
- return nil
-}
-
-func (c *Client) UpdatePoolByID(poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/pools/%s", c.Config.BaseURL, poolID)
-
- var response params.Pool
- body, err := json.Marshal(param)
- if err != nil {
- return response, err
- }
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Put(url)
- if err != nil || resp.IsError() {
- apiErr, decErr := c.decodeAPIError(resp.Body())
- if decErr != nil {
- return response, errors.Wrap(decErr, "sending request")
- }
- return response, fmt.Errorf("error performing login: %s", apiErr.Details)
- }
- return response, nil
-}
diff --git a/cmd/garm-cli/client/enterprises.go b/cmd/garm-cli/client/enterprises.go
deleted file mode 100644
index c3700a2c..00000000
--- a/cmd/garm-cli/client/enterprises.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package client
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/cloudbase/garm/params"
-)
-
-func (c *Client) ListEnterprises() ([]params.Enterprise, error) {
- var enterprises []params.Enterprise
- url := fmt.Sprintf("%s/api/v1/enterprises", c.Config.BaseURL)
- resp, err := c.client.R().
- SetResult(&enterprises).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return nil, err
- }
- return enterprises, nil
-}
-
-func (c *Client) CreateEnterprise(param params.CreateEnterpriseParams) (params.Enterprise, error) {
- var response params.Enterprise
- url := fmt.Sprintf("%s/api/v1/enterprises", c.Config.BaseURL)
-
- body, err := json.Marshal(param)
- if err != nil {
- return params.Enterprise{}, err
- }
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Post(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Enterprise{}, err
- }
- return response, nil
-}
-
-func (c *Client) GetEnterprise(enterpriseID string) (params.Enterprise, error) {
- var response params.Enterprise
- url := fmt.Sprintf("%s/api/v1/enterprises/%s", c.Config.BaseURL, enterpriseID)
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Enterprise{}, err
- }
- return response, nil
-}
-
-func (c *Client) DeleteEnterprise(enterpriseID string) error {
- url := fmt.Sprintf("%s/api/v1/enterprises/%s", c.Config.BaseURL, enterpriseID)
- resp, err := c.client.R().
- Delete(url)
- if err := c.handleError(err, resp); err != nil {
- return err
- }
- return nil
-}
-
-func (c *Client) CreateEnterprisePool(enterpriseID string, param params.CreatePoolParams) (params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/enterprises/%s/pools", c.Config.BaseURL, enterpriseID)
-
- var response params.Pool
- body, err := json.Marshal(param)
- if err != nil {
- return response, err
- }
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Post(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Pool{}, err
- }
- return response, nil
-}
-
-func (c *Client) ListEnterprisePools(enterpriseID string) ([]params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/enterprises/%s/pools", c.Config.BaseURL, enterpriseID)
-
- var response []params.Pool
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return nil, err
- }
- return response, nil
-}
-
-func (c *Client) GetEnterprisePool(enterpriseID, poolID string) (params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/enterprises/%s/pools/%s", c.Config.BaseURL, enterpriseID, poolID)
-
- var response params.Pool
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Pool{}, err
- }
- return response, nil
-}
-
-func (c *Client) DeleteEnterprisePool(enterpriseID, poolID string) error {
- url := fmt.Sprintf("%s/api/v1/enterprises/%s/pools/%s", c.Config.BaseURL, enterpriseID, poolID)
-
- resp, err := c.client.R().
- Delete(url)
-
- if err := c.handleError(err, resp); err != nil {
- return err
- }
- return nil
-}
-
-func (c *Client) UpdateEnterprisePool(enterpriseID, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/enterprises/%s/pools/%s", c.Config.BaseURL, enterpriseID, poolID)
-
- var response params.Pool
- body, err := json.Marshal(param)
- if err != nil {
- return response, err
- }
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Put(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Pool{}, err
- }
- return response, nil
-}
-
-func (c *Client) ListEnterpriseInstances(enterpriseID string) ([]params.Instance, error) {
- url := fmt.Sprintf("%s/api/v1/enterprises/%s/instances", c.Config.BaseURL, enterpriseID)
-
- var response []params.Instance
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return nil, err
- }
- return response, nil
-}
diff --git a/cmd/garm-cli/client/organizations.go b/cmd/garm-cli/client/organizations.go
deleted file mode 100644
index deff04c9..00000000
--- a/cmd/garm-cli/client/organizations.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package client
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/cloudbase/garm/params"
-)
-
-func (c *Client) ListOrganizations() ([]params.Organization, error) {
- var orgs []params.Organization
- url := fmt.Sprintf("%s/api/v1/organizations", c.Config.BaseURL)
- resp, err := c.client.R().
- SetResult(&orgs).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return nil, err
- }
- return orgs, nil
-}
-
-func (c *Client) CreateOrganization(param params.CreateOrgParams) (params.Organization, error) {
- var response params.Organization
- url := fmt.Sprintf("%s/api/v1/organizations", c.Config.BaseURL)
-
- body, err := json.Marshal(param)
- if err != nil {
- return params.Organization{}, err
- }
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Post(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Organization{}, err
- }
- return response, nil
-}
-
-func (c *Client) GetOrganization(orgID string) (params.Organization, error) {
- var response params.Organization
- url := fmt.Sprintf("%s/api/v1/organizations/%s", c.Config.BaseURL, orgID)
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Organization{}, err
- }
- return response, nil
-}
-
-func (c *Client) DeleteOrganization(orgID string) error {
- url := fmt.Sprintf("%s/api/v1/organizations/%s", c.Config.BaseURL, orgID)
- resp, err := c.client.R().
- Delete(url)
- if err := c.handleError(err, resp); err != nil {
- return err
- }
- return nil
-}
-
-func (c *Client) CreateOrgPool(orgID string, param params.CreatePoolParams) (params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/organizations/%s/pools", c.Config.BaseURL, orgID)
-
- var response params.Pool
- body, err := json.Marshal(param)
- if err != nil {
- return response, err
- }
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Post(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Pool{}, err
- }
- return response, nil
-}
-
-func (c *Client) ListOrgPools(orgID string) ([]params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/organizations/%s/pools", c.Config.BaseURL, orgID)
-
- var response []params.Pool
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return nil, err
- }
- return response, nil
-}
-
-func (c *Client) GetOrgPool(orgID, poolID string) (params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/organizations/%s/pools/%s", c.Config.BaseURL, orgID, poolID)
-
- var response params.Pool
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Pool{}, err
- }
- return response, nil
-}
-
-func (c *Client) DeleteOrgPool(orgID, poolID string) error {
- url := fmt.Sprintf("%s/api/v1/organizations/%s/pools/%s", c.Config.BaseURL, orgID, poolID)
-
- resp, err := c.client.R().
- Delete(url)
-
- if err := c.handleError(err, resp); err != nil {
- return err
- }
- return nil
-}
-
-func (c *Client) UpdateOrgPool(orgID, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/organizations/%s/pools/%s", c.Config.BaseURL, orgID, poolID)
-
- var response params.Pool
- body, err := json.Marshal(param)
- if err != nil {
- return response, err
- }
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Put(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Pool{}, err
- }
- return response, nil
-}
-
-func (c *Client) ListOrgInstances(orgID string) ([]params.Instance, error) {
- url := fmt.Sprintf("%s/api/v1/organizations/%s/instances", c.Config.BaseURL, orgID)
-
- var response []params.Instance
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return nil, err
- }
- return response, nil
-}
-
-func (c *Client) CreateMetricsToken() (string, error) {
- url := fmt.Sprintf("%s/api/v1/metrics-token", c.Config.BaseURL)
-
- type response struct {
- Token string `json:"token"`
- }
-
- var t response
- resp, err := c.client.R().
- SetResult(&t).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return "", err
- }
- return t.Token, nil
-}
diff --git a/cmd/garm-cli/client/repositories.go b/cmd/garm-cli/client/repositories.go
deleted file mode 100644
index 5759c218..00000000
--- a/cmd/garm-cli/client/repositories.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package client
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/cloudbase/garm/params"
-)
-
-func (c *Client) ListRepositories() ([]params.Repository, error) {
- var repos []params.Repository
- url := fmt.Sprintf("%s/api/v1/repositories", c.Config.BaseURL)
- resp, err := c.client.R().
- SetResult(&repos).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return nil, err
- }
- return repos, nil
-}
-
-func (c *Client) CreateRepository(param params.CreateRepoParams) (params.Repository, error) {
- var response params.Repository
- url := fmt.Sprintf("%s/api/v1/repositories", c.Config.BaseURL)
-
- body, err := json.Marshal(param)
- if err != nil {
- return params.Repository{}, err
- }
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Post(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Repository{}, err
- }
- return response, nil
-}
-
-func (c *Client) GetRepository(repoID string) (params.Repository, error) {
- var response params.Repository
- url := fmt.Sprintf("%s/api/v1/repositories/%s", c.Config.BaseURL, repoID)
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Repository{}, err
- }
- return response, nil
-}
-
-func (c *Client) DeleteRepository(repoID string) error {
- url := fmt.Sprintf("%s/api/v1/repositories/%s", c.Config.BaseURL, repoID)
- resp, err := c.client.R().
- Delete(url)
- if err := c.handleError(err, resp); err != nil {
- return err
- }
- return nil
-}
-
-func (c *Client) CreateRepoPool(repoID string, param params.CreatePoolParams) (params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/repositories/%s/pools", c.Config.BaseURL, repoID)
-
- var response params.Pool
- body, err := json.Marshal(param)
- if err != nil {
- return response, err
- }
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Post(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Pool{}, err
- }
- return response, nil
-}
-
-func (c *Client) ListRepoPools(repoID string) ([]params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/repositories/%s/pools", c.Config.BaseURL, repoID)
-
- var response []params.Pool
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return nil, err
- }
- return response, nil
-}
-
-func (c *Client) GetRepoPool(repoID, poolID string) (params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/repositories/%s/pools/%s", c.Config.BaseURL, repoID, poolID)
-
- var response params.Pool
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Pool{}, err
- }
- return response, nil
-}
-
-func (c *Client) DeleteRepoPool(repoID, poolID string) error {
- url := fmt.Sprintf("%s/api/v1/repositories/%s/pools/%s", c.Config.BaseURL, repoID, poolID)
-
- resp, err := c.client.R().
- Delete(url)
-
- if err := c.handleError(err, resp); err != nil {
- return err
- }
- return nil
-}
-
-func (c *Client) UpdateRepoPool(repoID, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- url := fmt.Sprintf("%s/api/v1/repositories/%s/pools/%s", c.Config.BaseURL, repoID, poolID)
-
- var response params.Pool
- body, err := json.Marshal(param)
- if err != nil {
- return response, err
- }
- resp, err := c.client.R().
- SetBody(body).
- SetResult(&response).
- Put(url)
- if err := c.handleError(err, resp); err != nil {
- return params.Pool{}, err
- }
- return response, nil
-}
-
-func (c *Client) ListRepoInstances(repoID string) ([]params.Instance, error) {
- url := fmt.Sprintf("%s/api/v1/repositories/%s/instances", c.Config.BaseURL, repoID)
-
- var response []params.Instance
- resp, err := c.client.R().
- SetResult(&response).
- Get(url)
- if err := c.handleError(err, resp); err != nil {
- return nil, err
- }
- return response, nil
-}
diff --git a/cmd/garm-cli/cmd/controller.go b/cmd/garm-cli/cmd/controller.go
new file mode 100644
index 00000000..c1326943
--- /dev/null
+++ b/cmd/garm-cli/cmd/controller.go
@@ -0,0 +1,188 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientController "github.com/cloudbase/garm/client/controller"
+ apiClientControllerInfo "github.com/cloudbase/garm/client/controller_info"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
+)
+
+var controllerCmd = &cobra.Command{
+ Use: "controller",
+ Aliases: []string{"controller-info"},
+ SilenceUsage: true,
+ Short: "Controller operations",
+ Long: `Query or update information about the current controller.`,
+ Run: nil,
+}
+
+var controllerShowCmd = &cobra.Command{
+ Use: "show",
+ Short: "Show information",
+ Long: `Show information about the current controller.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ showInfo := apiClientControllerInfo.NewControllerInfoParams()
+ response, err := apiCli.ControllerInfo.ControllerInfo(showInfo, authToken)
+ if err != nil {
+ return err
+ }
+ return formatInfo(response.Payload)
+ },
+}
+
+var controllerUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update controller information",
+ Long: `Update information about the current controller.
+
+Warning: Dragons ahead, please read carefully.
+
+Changing the URLs for the controller metadata, callback and webhooks, will
+impact the controller's ability to manage webhooks and runners.
+
+As GARM can be set up behind a reverse proxy or through several layers of
+network address translation or load balancing, we need to explicitly tell
+GARM how to reach each of these URLs. Internally, GARM sets up API endpoints
+as follows:
+
+ * /webhooks - the base URL for the webhooks. Github needs to reach this URL.
+ * /api/v1/metadata - the metadata URL. Your runners need to be able to reach this URL.
+ * /api/v1/callbacks - the callback URL. Your runners need to be able to reach this URL.
+
+You need to expose these endpoints to the interested parties (github or
+your runners), then you need to update the controller with the URLs you set up.
+
+For example, if you set the webhooks URL in your reverse proxy to
+https://garm.example.com/garm-hooks, this still needs to point to the "/webhooks"
+URL in the GARM backend, but in the controller info you need to set the URL to
+https://garm.example.com/garm-hooks using:
+
+ garm-cli controller update --webhook-url=https://garm.example.com/garm-hooks
+
+If you expose GARM to the outside world directly, or if you don't rewrite the URLs
+above in your reverse proxy config, use the above 3 endpoints without change,
+substituting garm.example.com with the correct hostname or IP address.
+
+In most cases, you will have a GARM backend (say 192.168.100.10) and a reverse
+proxy in front of it exposed as https://garm.example.com. If you don't rewrite
+the URLs in the reverse proxy, and you just point to your backend, you can set
+up the GARM controller URLs as:
+
+ garm-cli controller update \
+ --webhook-url=https://garm.example.com/webhooks \
+ --metadata-url=https://garm.example.com/api/v1/metadata \
+ --callback-url=https://garm.example.com/api/v1/callbacks
+`,
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ params := params.UpdateControllerParams{}
+ if cmd.Flags().Changed("metadata-url") {
+ params.MetadataURL = &metadataURL
+ }
+ if cmd.Flags().Changed("callback-url") {
+ params.CallbackURL = &callbackURL
+ }
+ if cmd.Flags().Changed("webhook-url") {
+ params.WebhookURL = &webhookURL
+ }
+
+ if cmd.Flags().Changed("minimum-job-age-backoff") {
+ params.MinimumJobAgeBackoff = &minimumJobAgeBackoff
+ }
+
+ if params.WebhookURL == nil && params.MetadataURL == nil && params.CallbackURL == nil && params.MinimumJobAgeBackoff == nil {
+ cmd.Help()
+ return fmt.Errorf("at least one of minimum-job-age-backoff, metadata-url, callback-url or webhook-url must be provided")
+ }
+
+ updateUrlsReq := apiClientController.NewUpdateControllerParams()
+ updateUrlsReq.Body = params
+
+ info, err := apiCli.Controller.UpdateController(updateUrlsReq, authToken)
+ if err != nil {
+ return fmt.Errorf("error updating controller: %w", err)
+ }
+ formatInfo(info.Payload)
+ return nil
+ },
+}
+
+func renderControllerInfoTable(info params.ControllerInfo) string {
+ t := table.NewWriter()
+ header := table.Row{"Field", "Value"}
+
+ if info.WebhookURL == "" {
+ info.WebhookURL = "N/A"
+ }
+
+ if info.ControllerWebhookURL == "" {
+ info.ControllerWebhookURL = "N/A"
+ }
+ serverVersion := "v0.0.0-unknown"
+ if info.Version != "" {
+ serverVersion = info.Version
+ }
+ t.AppendHeader(header)
+ t.AppendRow(table.Row{"Controller ID", info.ControllerID})
+ if info.Hostname != "" {
+ t.AppendRow(table.Row{"Hostname", info.Hostname})
+ }
+ t.AppendRow(table.Row{"Metadata URL", info.MetadataURL})
+ t.AppendRow(table.Row{"Callback URL", info.CallbackURL})
+ t.AppendRow(table.Row{"Webhook Base URL", info.WebhookURL})
+ t.AppendRow(table.Row{"Controller Webhook URL", info.ControllerWebhookURL})
+ t.AppendRow(table.Row{"Minimum Job Age Backoff", info.MinimumJobAgeBackoff})
+ t.AppendRow(table.Row{"Version", serverVersion})
+ return t.Render()
+}
+
+func formatInfo(info params.ControllerInfo) error {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(info)
+ return nil
+ }
+ fmt.Println(renderControllerInfoTable(info))
+ return nil
+}
+
+func init() {
+ controllerUpdateCmd.Flags().StringVarP(&metadataURL, "metadata-url", "m", "", "The metadata URL for the controller (ie. https://garm.example.com/api/v1/metadata)")
+ controllerUpdateCmd.Flags().StringVarP(&callbackURL, "callback-url", "c", "", "The callback URL for the controller (ie. https://garm.example.com/api/v1/callbacks)")
+ controllerUpdateCmd.Flags().StringVarP(&webhookURL, "webhook-url", "w", "", "The webhook URL for the controller (ie. https://garm.example.com/webhooks)")
+ controllerUpdateCmd.Flags().UintVarP(&minimumJobAgeBackoff, "minimum-job-age-backoff", "b", 0, "The minimum job age backoff for the controller")
+
+ controllerCmd.AddCommand(
+ controllerShowCmd,
+ controllerUpdateCmd,
+ )
+
+ rootCmd.AddCommand(controllerCmd)
+}
diff --git a/cmd/garm-cli/cmd/credentials.go b/cmd/garm-cli/cmd/credentials.go
deleted file mode 100644
index 6559eeb6..00000000
--- a/cmd/garm-cli/cmd/credentials.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package cmd
-
-import (
- "fmt"
-
- "github.com/cloudbase/garm/params"
-
- "github.com/jedib0t/go-pretty/v6/table"
- "github.com/spf13/cobra"
-)
-
-// credentialsCmd represents the credentials command
-var credentialsCmd = &cobra.Command{
- Use: "credentials",
- Aliases: []string{"creds"},
- Short: "List configured credentials",
- Long: `List all available credentials configured in the service
-config file.
-
-Currently, github personal tokens are configured statically in the config file
-of the garm service. This command lists the names of those credentials,
-which in turn can be used to define pools of runners withing repositories.`,
- Run: nil,
-}
-
-func init() {
- credentialsCmd.AddCommand(
- &cobra.Command{
- Use: "list",
- Aliases: []string{"ls"},
- Short: "List configured github credentials",
- Long: `List the names of the github personal access tokens availabe to the garm.`,
- SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
- if needsInit {
- return errNeedsInitError
- }
-
- creds, err := cli.ListCredentials()
- if err != nil {
- return err
- }
- formatGithubCredentials(creds)
- return nil
- },
- })
-
- rootCmd.AddCommand(credentialsCmd)
-}
-
-func formatGithubCredentials(creds []params.GithubCredentials) {
- t := table.NewWriter()
- header := table.Row{"Name", "Description", "Base URL", "API URL", "Upload URL"}
- t.AppendHeader(header)
- for _, val := range creds {
- t.AppendRow(table.Row{val.Name, val.Description, val.BaseURL, val.APIBaseURL, val.UploadBaseURL})
- t.AppendSeparator()
- }
- fmt.Println(t.Render())
-}
diff --git a/cmd/garm-cli/cmd/enterprise.go b/cmd/garm-cli/cmd/enterprise.go
index 0641099e..5c937b81 100644
--- a/cmd/garm-cli/cmd/enterprise.go
+++ b/cmd/garm-cli/cmd/enterprise.go
@@ -16,15 +16,19 @@ package cmd
import (
"fmt"
-
- "github.com/cloudbase/garm/params"
+ "strings"
"github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
var (
enterpriseName string
+ enterpriseEndpoint string
enterpriseWebhookSecret string
enterpriseCreds string
)
@@ -49,21 +53,23 @@ var enterpriseAddCmd = &cobra.Command{
Short: "Add enterprise",
Long: `Add a new enterprise to the manager.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
- newEnterpriseReq := params.CreateEnterpriseParams{
- Name: enterpriseName,
- WebhookSecret: enterpriseWebhookSecret,
- CredentialsName: enterpriseCreds,
+ newEnterpriseReq := apiClientEnterprises.NewCreateEnterpriseParams()
+ newEnterpriseReq.Body = params.CreateEnterpriseParams{
+ Name: enterpriseName,
+ WebhookSecret: enterpriseWebhookSecret,
+ CredentialsName: enterpriseCreds,
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
}
- enterprise, err := cli.CreateEnterprise(newEnterpriseReq)
+ response, err := apiCli.Enterprises.CreateEnterprise(newEnterpriseReq, authToken)
if err != nil {
return err
}
- formatOneEnterprise(enterprise)
+ formatOneEnterprise(response.Payload)
return nil
},
}
@@ -74,16 +80,19 @@ var enterpriseListCmd = &cobra.Command{
Short: "List enterprises",
Long: `List all configured enterprises that are currently managed.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
- enterprises, err := cli.ListEnterprises()
+ listEnterprisesReq := apiClientEnterprises.NewListEnterprisesParams()
+ listEnterprisesReq.Name = &enterpriseName
+ listEnterprisesReq.Endpoint = &enterpriseEndpoint
+ response, err := apiCli.Enterprises.ListEnterprises(listEnterprisesReq, authToken)
if err != nil {
return err
}
- formatEnterprises(enterprises)
+ formatEnterprises(response.Payload)
return nil
},
}
@@ -93,7 +102,7 @@ var enterpriseShowCmd = &cobra.Command{
Short: "Show details for one enterprise",
Long: `Displays detailed information about a single enterprise.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -103,11 +112,19 @@ var enterpriseShowCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
- enterprise, err := cli.GetEnterprise(args[0])
+
+ enterpriseID, err := resolveEnterprise(args[0], enterpriseEndpoint)
if err != nil {
return err
}
- formatOneEnterprise(enterprise)
+
+ showEnterpriseReq := apiClientEnterprises.NewGetEnterpriseParams()
+ showEnterpriseReq.EnterpriseID = enterpriseID
+ response, err := apiCli.Enterprises.GetEnterprise(showEnterpriseReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEnterprise(response.Payload)
return nil
},
}
@@ -118,7 +135,7 @@ var enterpriseDeleteCmd = &cobra.Command{
Short: "Removes one enterprise",
Long: `Delete one enterprise from the manager.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -128,50 +145,128 @@ var enterpriseDeleteCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
- if err := cli.DeleteEnterprise(args[0]); err != nil {
+
+ enterpriseID, err := resolveEnterprise(args[0], enterpriseEndpoint)
+ if err != nil {
+ return err
+ }
+
+ deleteEnterpriseReq := apiClientEnterprises.NewDeleteEnterpriseParams()
+ deleteEnterpriseReq.EnterpriseID = enterpriseID
+ if err := apiCli.Enterprises.DeleteEnterprise(deleteEnterpriseReq, authToken); err != nil {
return err
}
return nil
},
}
-func init() {
+var enterpriseUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update enterprise",
+ Long: `Update enterprise credentials or webhook secret.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("command requires a enterprise ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+ enterpriseID, err := resolveEnterprise(args[0], enterpriseEndpoint)
+ if err != nil {
+ return err
+ }
+
+ updateEnterpriseReq := apiClientEnterprises.NewUpdateEnterpriseParams()
+ updateEnterpriseReq.Body = params.UpdateEntityParams{
+ WebhookSecret: repoWebhookSecret,
+ CredentialsName: repoCreds,
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
+ }
+ updateEnterpriseReq.EnterpriseID = enterpriseID
+ response, err := apiCli.Enterprises.UpdateEnterprise(updateEnterpriseReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEnterprise(response.Payload)
+ return nil
+ },
+}
+
+func init() {
enterpriseAddCmd.Flags().StringVar(&enterpriseName, "name", "", "The name of the enterprise")
enterpriseAddCmd.Flags().StringVar(&enterpriseWebhookSecret, "webhook-secret", "", "The webhook secret for this enterprise")
enterpriseAddCmd.Flags().StringVar(&enterpriseCreds, "credentials", "", "Credentials name. See credentials list.")
+ enterpriseAddCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", string(params.PoolBalancerTypeRoundRobin), "The balancing strategy to use when creating runners in pools matching requested labels.")
+
+ enterpriseListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+ enterpriseListCmd.Flags().StringVarP(&enterpriseName, "name", "n", "", "Exact enterprise name to filter by.")
+ enterpriseListCmd.Flags().StringVarP(&enterpriseEndpoint, "endpoint", "e", "", "Exact endpoint name to filter by.")
+
enterpriseAddCmd.MarkFlagRequired("credentials") //nolint
enterpriseAddCmd.MarkFlagRequired("name") //nolint
+ enterpriseUpdateCmd.Flags().StringVar(&enterpriseWebhookSecret, "webhook-secret", "", "The webhook secret for this enterprise")
+ enterpriseUpdateCmd.Flags().StringVar(&enterpriseCreds, "credentials", "", "Credentials name. See credentials list.")
+ enterpriseUpdateCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", "", "The balancing strategy to use when creating runners in pools matching requested labels.")
+ enterpriseUpdateCmd.Flags().StringVar(&enterpriseEndpoint, "endpoint", "", "When using the name of the enterprise, the endpoint must be specified when multiple enterprises with the same name exist.")
+
+ enterpriseDeleteCmd.Flags().StringVar(&enterpriseEndpoint, "endpoint", "", "When using the name of the enterprise, the endpoint must be specified when multiple enterprises with the same name exist.")
+ enterpriseShowCmd.Flags().StringVar(&enterpriseEndpoint, "endpoint", "", "When using the name of the enterprise, the endpoint must be specified when multiple enterprises with the same name exist.")
enterpriseCmd.AddCommand(
enterpriseListCmd,
enterpriseAddCmd,
enterpriseShowCmd,
enterpriseDeleteCmd,
+ enterpriseUpdateCmd,
)
rootCmd.AddCommand(enterpriseCmd)
}
func formatEnterprises(enterprises []params.Enterprise) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(enterprises)
+ return
+ }
t := table.NewWriter()
- header := table.Row{"ID", "Name", "Credentials name", "Pool mgr running"}
+ header := table.Row{"ID", "Name", "Endpoint", "Credentials name", "Pool Balancer Type", "Pool mgr running"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
t.AppendHeader(header)
for _, val := range enterprises {
- t.AppendRow(table.Row{val.ID, val.Name, val.CredentialsName, val.PoolManagerStatus.IsRunning})
+ row := table.Row{val.ID, val.Name, val.Endpoint.Name, val.Credentials.Name, val.GetBalancerType(), val.PoolManagerStatus.IsRunning}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
t.AppendSeparator()
}
fmt.Println(t.Render())
}
func formatOneEnterprise(enterprise params.Enterprise) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(enterprise)
+ return
+ }
t := table.NewWriter()
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
header := table.Row{"Field", "Value"}
t.AppendHeader(header)
t.AppendRow(table.Row{"ID", enterprise.ID})
+ t.AppendRow(table.Row{"Created At", enterprise.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", enterprise.UpdatedAt})
t.AppendRow(table.Row{"Name", enterprise.Name})
- t.AppendRow(table.Row{"Credentials", enterprise.CredentialsName})
+ t.AppendRow(table.Row{"Endpoint", enterprise.Endpoint.Name})
+ t.AppendRow(table.Row{"Pool balancer type", enterprise.GetBalancerType()})
+ t.AppendRow(table.Row{"Credentials", enterprise.Credentials.Name})
t.AppendRow(table.Row{"Pool manager running", enterprise.PoolManagerStatus.IsRunning})
if !enterprise.PoolManagerStatus.IsRunning {
t.AppendRow(table.Row{"Failure reason", enterprise.PoolManagerStatus.FailureReason})
@@ -182,9 +277,15 @@ func formatOneEnterprise(enterprise params.Enterprise) {
t.AppendRow(table.Row{"Pools", pool.ID}, rowConfigAutoMerge)
}
}
+
+ if len(enterprise.Events) > 0 {
+ for _, event := range enterprise.Events {
+ t.AppendRow(table.Row{"Events", fmt.Sprintf("%s %s: %s", event.CreatedAt.Format("2006-01-02T15:04:05"), strings.ToUpper(string(event.EventLevel)), event.Message)}, rowConfigAutoMerge)
+ }
+ }
t.SetColumnConfigs([]table.ColumnConfig{
{Number: 1, AutoMerge: true},
- {Number: 2, AutoMerge: false},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
})
fmt.Println(t.Render())
diff --git a/cmd/garm-cli/cmd/events.go b/cmd/garm-cli/cmd/events.go
new file mode 100644
index 00000000..da44732a
--- /dev/null
+++ b/cmd/garm-cli/cmd/events.go
@@ -0,0 +1,65 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cmd
+
+import (
+ "context"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/gorilla/websocket"
+ "github.com/spf13/cobra"
+
+ garmWs "github.com/cloudbase/garm-provider-common/util/websocket"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+)
+
+var signals = []os.Signal{
+ os.Interrupt,
+ syscall.SIGTERM,
+}
+
+var eventsCmd = &cobra.Command{
+ Use: "debug-events",
+ SilenceUsage: true,
+ Short: "Stream garm events",
+ Long: `Stream all garm events to the terminal.`,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ ctx, stop := signal.NotifyContext(context.Background(), signals...)
+ defer stop()
+
+ reader, err := garmWs.NewReader(ctx, mgr.BaseURL, "/api/v1/ws/events", mgr.Token, common.PrintWebsocketMessage)
+ if err != nil {
+ return err
+ }
+
+ if err := reader.Start(); err != nil {
+ return err
+ }
+
+ if eventsFilters != "" {
+ if err := reader.WriteMessage(websocket.TextMessage, []byte(eventsFilters)); err != nil {
+ return err
+ }
+ }
+ <-reader.Done()
+ return nil
+ },
+}
+
+func init() {
+ eventsCmd.Flags().StringVarP(&eventsFilters, "filters", "m", "", "Json with event filters you want to apply")
+ rootCmd.AddCommand(eventsCmd)
+}
diff --git a/cmd/garm-cli/cmd/gitea.go b/cmd/garm-cli/cmd/gitea.go
new file mode 100644
index 00000000..6627fd6f
--- /dev/null
+++ b/cmd/garm-cli/cmd/gitea.go
@@ -0,0 +1,34 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cmd
+
+import "github.com/spf13/cobra"
+
+// giteaCmd represents the the gitea command. This command has a set
+// of subcommands that allow configuring and managing Gitea endpoints
+// and credentials.
+var giteaCmd = &cobra.Command{
+ Use: "gitea",
+ Aliases: []string{"gt"},
+ SilenceUsage: true,
+ Short: "Manage Gitea resources",
+ Long: `Manage Gitea related resources.
+
+This command allows you to configure and manage Gitea endpoints and credentials`,
+ Run: nil,
+}
+
+func init() {
+ rootCmd.AddCommand(giteaCmd)
+}
diff --git a/cmd/garm-cli/cmd/gitea_credentials.go b/cmd/garm-cli/cmd/gitea_credentials.go
new file mode 100644
index 00000000..d26f95ed
--- /dev/null
+++ b/cmd/garm-cli/cmd/gitea_credentials.go
@@ -0,0 +1,317 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientCreds "github.com/cloudbase/garm/client/credentials"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
+)
+
+// giteaCredentialsCmd represents the gitea credentials command
+var giteaCredentialsCmd = &cobra.Command{
+ Use: "credentials",
+ Aliases: []string{"creds"},
+ Short: "Manage gitea credentials",
+ Long: `Manage Gitea credentials stored in GARM.
+
+This command allows you to add, update, list and delete Gitea credentials.`,
+ Run: nil,
+}
+
+var giteaCredentialsListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ Short: "List configured gitea credentials",
+ Long: `List the names of the gitea personal access tokens available to the garm.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ listCredsReq := apiClientCreds.NewListGiteaCredentialsParams()
+ response, err := apiCli.Credentials.ListGiteaCredentials(listCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatGiteaCredentials(response.Payload)
+ return nil
+ },
+}
+
+var giteaCredentialsShowCmd = &cobra.Command{
+ Use: "show",
+ Aliases: []string{"get"},
+ Short: "Show details of a configured gitea credential",
+ Long: `Show the details of a configured gitea credential.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+ showCredsReq := apiClientCreds.NewGetGiteaCredentialsParams().WithID(credID)
+ response, err := apiCli.Credentials.GetGiteaCredentials(showCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGiteaCredential(response.Payload)
+ return nil
+ },
+}
+
+var giteaCredentialsUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update a gitea credential",
+ Long: "Update a gitea credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+
+ updateParams, err := parseGiteaCredentialsUpdateParams()
+ if err != nil {
+ return err
+ }
+
+ updateCredsReq := apiClientCreds.NewUpdateGiteaCredentialsParams().WithID(credID)
+ updateCredsReq.Body = updateParams
+
+ response, err := apiCli.Credentials.UpdateGiteaCredentials(updateCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGiteaCredential(response.Payload)
+ return nil
+ },
+}
+
+var giteaCredentialsDeleteCmd = &cobra.Command{
+ Use: "delete",
+ Aliases: []string{"remove", "rm"},
+ Short: "Delete a gitea credential",
+ Long: "Delete a gitea credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+
+ deleteCredsReq := apiClientCreds.NewDeleteGiteaCredentialsParams().WithID(credID)
+ if err := apiCli.Credentials.DeleteGiteaCredentials(deleteCredsReq, authToken); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var giteaCredentialsAddCmd = &cobra.Command{
+ Use: "add",
+ Short: "Add a gitea credential",
+ Long: "Add a gitea credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) > 0 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ addParams, err := parseGiteaCredentialsAddParams()
+ if err != nil {
+ return err
+ }
+
+ addCredsReq := apiClientCreds.NewCreateGiteaCredentialsParams()
+ addCredsReq.Body = addParams
+
+ response, err := apiCli.Credentials.CreateGiteaCredentials(addCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGiteaCredential(response.Payload)
+ return nil
+ },
+}
+
+func init() {
+ giteaCredentialsUpdateCmd.Flags().StringVar(&credentialsName, "name", "", "Name of the credential")
+ giteaCredentialsUpdateCmd.Flags().StringVar(&credentialsDescription, "description", "", "Description of the credential")
+ giteaCredentialsUpdateCmd.Flags().StringVar(&credentialsOAuthToken, "pat-oauth-token", "", "If the credential is a personal access token, the OAuth token")
+
+ giteaCredentialsListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+
+ giteaCredentialsAddCmd.Flags().StringVar(&credentialsName, "name", "", "Name of the credential")
+ giteaCredentialsAddCmd.Flags().StringVar(&credentialsDescription, "description", "", "Description of the credential")
+ giteaCredentialsAddCmd.Flags().StringVar(&credentialsOAuthToken, "pat-oauth-token", "", "If the credential is a personal access token, the OAuth token")
+ giteaCredentialsAddCmd.Flags().StringVar(&credentialsType, "auth-type", "", "The type of the credential")
+ giteaCredentialsAddCmd.Flags().StringVar(&credentialsEndpoint, "endpoint", "", "The endpoint to associate the credential with")
+
+ giteaCredentialsAddCmd.MarkFlagRequired("name")
+ giteaCredentialsAddCmd.MarkFlagRequired("auth-type")
+ giteaCredentialsAddCmd.MarkFlagRequired("description")
+ giteaCredentialsAddCmd.MarkFlagRequired("endpoint")
+
+ giteaCredentialsCmd.AddCommand(
+ giteaCredentialsListCmd,
+ giteaCredentialsShowCmd,
+ giteaCredentialsUpdateCmd,
+ giteaCredentialsDeleteCmd,
+ giteaCredentialsAddCmd,
+ )
+ giteaCmd.AddCommand(giteaCredentialsCmd)
+}
+
+func parseGiteaCredentialsAddParams() (ret params.CreateGiteaCredentialsParams, err error) {
+ ret.Name = credentialsName
+ ret.Description = credentialsDescription
+ ret.AuthType = params.ForgeAuthType(credentialsType)
+ ret.Endpoint = credentialsEndpoint
+ switch ret.AuthType {
+ case params.ForgeAuthTypePAT:
+ ret.PAT.OAuth2Token = credentialsOAuthToken
+ default:
+ return params.CreateGiteaCredentialsParams{}, fmt.Errorf("invalid auth type: %s (supported are: pat)", credentialsType)
+ }
+
+ return ret, nil
+}
+
+func parseGiteaCredentialsUpdateParams() (params.UpdateGiteaCredentialsParams, error) {
+ var updateParams params.UpdateGiteaCredentialsParams
+
+ if credentialsName != "" {
+ updateParams.Name = &credentialsName
+ }
+
+ if credentialsDescription != "" {
+ updateParams.Description = &credentialsDescription
+ }
+
+ if credentialsOAuthToken != "" {
+ if updateParams.PAT == nil {
+ updateParams.PAT = ¶ms.GithubPAT{}
+ }
+ updateParams.PAT.OAuth2Token = credentialsOAuthToken
+ }
+
+ return updateParams, nil
+}
+
+func formatGiteaCredentials(creds []params.ForgeCredentials) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(creds)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"ID", "Name", "Description", "Base URL", "API URL", "Type"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
+ t.AppendHeader(header)
+ for _, val := range creds {
+ row := table.Row{val.ID, val.Name, val.Description, val.BaseURL, val.APIBaseURL, val.AuthType}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
+ t.AppendSeparator()
+ }
+ fmt.Println(t.Render())
+}
+
+func formatOneGiteaCredential(cred params.ForgeCredentials) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(cred)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"Field", "Value"}
+ t.AppendHeader(header)
+
+ t.AppendRow(table.Row{"ID", cred.ID})
+ t.AppendRow(table.Row{"Created At", cred.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", cred.UpdatedAt})
+ t.AppendRow(table.Row{"Name", cred.Name})
+ t.AppendRow(table.Row{"Description", cred.Description})
+ t.AppendRow(table.Row{"Base URL", cred.BaseURL})
+ t.AppendRow(table.Row{"API URL", cred.APIBaseURL})
+ t.AppendRow(table.Row{"Type", cred.AuthType})
+ t.AppendRow(table.Row{"Endpoint", cred.Endpoint.Name})
+
+ if len(cred.Repositories) > 0 {
+ t.AppendRow(table.Row{"", ""})
+ for _, repo := range cred.Repositories {
+ t.AppendRow(table.Row{"Repositories", repo.String()})
+ }
+ }
+
+ if len(cred.Organizations) > 0 {
+ t.AppendRow(table.Row{"", ""})
+ for _, org := range cred.Organizations {
+ t.AppendRow(table.Row{"Organizations", org.Name})
+ }
+ }
+
+ t.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 1, AutoMerge: true},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
+ })
+ fmt.Println(t.Render())
+}
diff --git a/cmd/garm-cli/cmd/gitea_endpoints.go b/cmd/garm-cli/cmd/gitea_endpoints.go
new file mode 100644
index 00000000..55fa09c9
--- /dev/null
+++ b/cmd/garm-cli/cmd/gitea_endpoints.go
@@ -0,0 +1,231 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ apiClientEndpoints "github.com/cloudbase/garm/client/endpoints"
+ "github.com/cloudbase/garm/params"
+)
+
+var giteaEndpointCmd = &cobra.Command{
+ Use: "endpoint",
+ SilenceUsage: true,
+ Short: "Manage Gitea endpoints",
+ Long: `Manage Gitea endpoints.
+
+This command allows you to configure and manage Gitea endpoints`,
+ Run: nil,
+}
+
+var giteaEndpointListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ SilenceUsage: true,
+ Short: "List Gitea endpoints",
+ Long: `List all configured Gitea endpoints.`,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ newListReq := apiClientEndpoints.NewListGiteaEndpointsParams()
+ response, err := apiCli.Endpoints.ListGiteaEndpoints(newListReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatEndpoints(response.Payload)
+ return nil
+ },
+}
+
+var giteaEndpointShowCmd = &cobra.Command{
+ Use: "show",
+ Aliases: []string{"get"},
+ SilenceUsage: true,
+ Short: "Show Gitea endpoint",
+ Long: `Show details of a Gitea endpoint.`,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ newShowReq := apiClientEndpoints.NewGetGiteaEndpointParams()
+ newShowReq.Name = args[0]
+ response, err := apiCli.Endpoints.GetGiteaEndpoint(newShowReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+var giteaEndpointCreateCmd = &cobra.Command{
+ Use: "create",
+ SilenceUsage: true,
+ Short: "Create Gitea endpoint",
+ Long: `Create a new Gitea endpoint.`,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ createParams, err := parseGiteaCreateParams()
+ if err != nil {
+ return err
+ }
+
+ newCreateReq := apiClientEndpoints.NewCreateGiteaEndpointParams()
+ newCreateReq.Body = createParams
+
+ response, err := apiCli.Endpoints.CreateGiteaEndpoint(newCreateReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+var giteaEndpointDeleteCmd = &cobra.Command{
+ Use: "delete",
+ Aliases: []string{"remove", "rm"},
+ SilenceUsage: true,
+ Short: "Delete Gitea endpoint",
+ Long: "Delete a Gitea endpoint",
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ newDeleteReq := apiClientEndpoints.NewDeleteGiteaEndpointParams()
+ newDeleteReq.Name = args[0]
+ if err := apiCli.Endpoints.DeleteGiteaEndpoint(newDeleteReq, authToken); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var giteaEndpointUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update Gitea endpoint",
+ Long: "Update a Gitea endpoint",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ updateParams := params.UpdateGiteaEndpointParams{}
+
+ if cmd.Flags().Changed("ca-cert-path") {
+ cert, err := parseAndReadCABundle()
+ if err != nil {
+ return err
+ }
+ updateParams.CACertBundle = cert
+ }
+
+ if cmd.Flags().Changed("description") {
+ updateParams.Description = &endpointDescription
+ }
+
+ if cmd.Flags().Changed("base-url") {
+ updateParams.BaseURL = &endpointBaseURL
+ }
+
+ if cmd.Flags().Changed("api-base-url") {
+ updateParams.APIBaseURL = &endpointAPIBaseURL
+ }
+
+ newEndpointUpdateReq := apiClientEndpoints.NewUpdateGiteaEndpointParams()
+ newEndpointUpdateReq.Name = args[0]
+ newEndpointUpdateReq.Body = updateParams
+
+ response, err := apiCli.Endpoints.UpdateGiteaEndpoint(newEndpointUpdateReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+func init() {
+ giteaEndpointCreateCmd.Flags().StringVar(&endpointName, "name", "", "Name of the Gitea endpoint")
+ giteaEndpointCreateCmd.Flags().StringVar(&endpointDescription, "description", "", "Description for the github endpoint")
+ giteaEndpointCreateCmd.Flags().StringVar(&endpointBaseURL, "base-url", "", "Base URL of the Gitea endpoint")
+ giteaEndpointCreateCmd.Flags().StringVar(&endpointAPIBaseURL, "api-base-url", "", "API Base URL of the Gitea endpoint")
+ giteaEndpointCreateCmd.Flags().StringVar(&endpointCACertPath, "ca-cert-path", "", "CA Cert Path of the Gitea endpoint")
+
+ giteaEndpointListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+
+ giteaEndpointCreateCmd.MarkFlagRequired("name")
+ giteaEndpointCreateCmd.MarkFlagRequired("base-url")
+ giteaEndpointCreateCmd.MarkFlagRequired("api-base-url")
+
+ giteaEndpointUpdateCmd.Flags().StringVar(&endpointDescription, "description", "", "Description for the gitea endpoint")
+ giteaEndpointUpdateCmd.Flags().StringVar(&endpointBaseURL, "base-url", "", "Base URL of the Gitea endpoint")
+ giteaEndpointUpdateCmd.Flags().StringVar(&endpointAPIBaseURL, "api-base-url", "", "API Base URL of the Gitea endpoint")
+ giteaEndpointUpdateCmd.Flags().StringVar(&endpointCACertPath, "ca-cert-path", "", "CA Cert Path of the Gitea endpoint")
+
+ giteaEndpointCmd.AddCommand(
+ giteaEndpointListCmd,
+ giteaEndpointShowCmd,
+ giteaEndpointCreateCmd,
+ giteaEndpointDeleteCmd,
+ giteaEndpointUpdateCmd,
+ )
+
+ giteaCmd.AddCommand(giteaEndpointCmd)
+}
+
+func parseGiteaCreateParams() (params.CreateGiteaEndpointParams, error) {
+ certBundleBytes, err := parseAndReadCABundle()
+ if err != nil {
+ return params.CreateGiteaEndpointParams{}, err
+ }
+
+ ret := params.CreateGiteaEndpointParams{
+ Name: endpointName,
+ BaseURL: endpointBaseURL,
+ APIBaseURL: endpointAPIBaseURL,
+ Description: endpointDescription,
+ CACertBundle: certBundleBytes,
+ }
+ return ret, nil
+}
diff --git a/cmd/garm-cli/cmd/github.go b/cmd/garm-cli/cmd/github.go
new file mode 100644
index 00000000..71342026
--- /dev/null
+++ b/cmd/garm-cli/cmd/github.go
@@ -0,0 +1,43 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cmd
+
+import "github.com/spf13/cobra"
+
+var (
+ endpointName string
+ endpointBaseURL string
+ endpointUploadURL string
+ endpointAPIBaseURL string
+ endpointCACertPath string
+ endpointDescription string
+)
+
+// githubCmd represents the the github command. This command has a set
+// of subcommands that allow configuring and managing GitHub endpoints
+// and credentials.
+var githubCmd = &cobra.Command{
+ Use: "github",
+ Aliases: []string{"gh"},
+ SilenceUsage: true,
+ Short: "Manage GitHub resources",
+ Long: `Manage GitHub related resources.
+
+This command allows you to configure and manage GitHub endpoints and credentials`,
+ Run: nil,
+}
+
+func init() {
+ rootCmd.AddCommand(githubCmd)
+}
diff --git a/cmd/garm-cli/cmd/github_credentials.go b/cmd/garm-cli/cmd/github_credentials.go
new file mode 100644
index 00000000..6f9b6409
--- /dev/null
+++ b/cmd/garm-cli/cmd/github_credentials.go
@@ -0,0 +1,425 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package cmd
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "os"
+ "strconv"
+
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientCreds "github.com/cloudbase/garm/client/credentials"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
+)
+
+var (
+ credentialsName string
+ credentialsDescription string
+ credentialsOAuthToken string
+ credentialsAppInstallationID int64
+ credentialsAppID int64
+ credentialsPrivateKeyPath string
+ credentialsType string
+ credentialsEndpoint string
+)
+
+// credentialsCmd represents the credentials command
+var credentialsCmd = &cobra.Command{
+ Use: "credentials",
+ Aliases: []string{"creds"},
+ Short: "List configured credentials. This is an alias for the github credentials command.",
+ Long: `List all available github credentials.
+
+This command is an alias for the garm-cli github credentials command.`,
+ Run: nil,
+}
+
+// githubCredentialsCmd represents the github credentials command
+var githubCredentialsCmd = &cobra.Command{
+ Use: "credentials",
+ Aliases: []string{"creds"},
+ Short: "Manage github credentials",
+ Long: `Manage GitHub credentials stored in GARM.
+
+This command allows you to add, update, list and delete GitHub credentials.`,
+ Run: nil,
+}
+
+var githubCredentialsListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ Short: "List configured github credentials",
+ Long: `List the names of the github personal access tokens available to the garm.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ listCredsReq := apiClientCreds.NewListCredentialsParams()
+ response, err := apiCli.Credentials.ListCredentials(listCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatGithubCredentials(response.Payload)
+ return nil
+ },
+}
+
+var githubCredentialsShowCmd = &cobra.Command{
+ Use: "show",
+ Aliases: []string{"get"},
+ Short: "Show details of a configured github credential",
+ Long: `Show the details of a configured github credential.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+ showCredsReq := apiClientCreds.NewGetCredentialsParams().WithID(credID)
+ response, err := apiCli.Credentials.GetCredentials(showCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGithubCredential(response.Payload)
+ return nil
+ },
+}
+
+var githubCredentialsUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update a github credential",
+ Long: "Update a github credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+
+ updateParams, err := parseCredentialsUpdateParams()
+ if err != nil {
+ return err
+ }
+
+ updateCredsReq := apiClientCreds.NewUpdateCredentialsParams().WithID(credID)
+ updateCredsReq.Body = updateParams
+
+ response, err := apiCli.Credentials.UpdateCredentials(updateCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGithubCredential(response.Payload)
+ return nil
+ },
+}
+
+var githubCredentialsDeleteCmd = &cobra.Command{
+ Use: "delete",
+ Aliases: []string{"remove", "rm"},
+ Short: "Delete a github credential",
+ Long: "Delete a github credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) < 1 {
+ return fmt.Errorf("missing required argument: credential ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ credID, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid credential ID: %s", args[0])
+ }
+
+ deleteCredsReq := apiClientCreds.NewDeleteCredentialsParams().WithID(credID)
+ if err := apiCli.Credentials.DeleteCredentials(deleteCredsReq, authToken); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var githubCredentialsAddCmd = &cobra.Command{
+ Use: "add",
+ Short: "Add a github credential",
+ Long: "Add a github credential",
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) > 0 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ addParams, err := parseCredentialsAddParams()
+ if err != nil {
+ return err
+ }
+
+ addCredsReq := apiClientCreds.NewCreateCredentialsParams()
+ addCredsReq.Body = addParams
+
+ response, err := apiCli.Credentials.CreateCredentials(addCredsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneGithubCredential(response.Payload)
+ return nil
+ },
+}
+
+func init() {
+ githubCredentialsUpdateCmd.Flags().StringVar(&credentialsName, "name", "", "Name of the credential")
+ githubCredentialsUpdateCmd.Flags().StringVar(&credentialsDescription, "description", "", "Description of the credential")
+ githubCredentialsUpdateCmd.Flags().StringVar(&credentialsOAuthToken, "pat-oauth-token", "", "If the credential is a personal access token, the OAuth token")
+ githubCredentialsUpdateCmd.Flags().Int64Var(&credentialsAppInstallationID, "app-installation-id", 0, "If the credential is an app, the installation ID")
+ githubCredentialsUpdateCmd.Flags().Int64Var(&credentialsAppID, "app-id", 0, "If the credential is an app, the app ID")
+ githubCredentialsUpdateCmd.Flags().StringVar(&credentialsPrivateKeyPath, "private-key-path", "", "If the credential is an app, the path to the private key file")
+
+ githubCredentialsListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+
+ githubCredentialsUpdateCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "app-installation-id")
+ githubCredentialsUpdateCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "app-id")
+ githubCredentialsUpdateCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "private-key-path")
+ githubCredentialsUpdateCmd.MarkFlagsRequiredTogether("app-installation-id", "app-id", "private-key-path")
+
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsName, "name", "", "Name of the credential")
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsDescription, "description", "", "Description of the credential")
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsOAuthToken, "pat-oauth-token", "", "If the credential is a personal access token, the OAuth token")
+ githubCredentialsAddCmd.Flags().Int64Var(&credentialsAppInstallationID, "app-installation-id", 0, "If the credential is an app, the installation ID")
+ githubCredentialsAddCmd.Flags().Int64Var(&credentialsAppID, "app-id", 0, "If the credential is an app, the app ID")
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsPrivateKeyPath, "private-key-path", "", "If the credential is an app, the path to the private key file")
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsType, "auth-type", "", "The type of the credential")
+ githubCredentialsAddCmd.Flags().StringVar(&credentialsEndpoint, "endpoint", "", "The endpoint to associate the credential with")
+
+ githubCredentialsAddCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "app-installation-id")
+ githubCredentialsAddCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "app-id")
+ githubCredentialsAddCmd.MarkFlagsMutuallyExclusive("pat-oauth-token", "private-key-path")
+ githubCredentialsAddCmd.MarkFlagsRequiredTogether("app-installation-id", "app-id", "private-key-path")
+
+ githubCredentialsAddCmd.MarkFlagRequired("name")
+ githubCredentialsAddCmd.MarkFlagRequired("auth-type")
+ githubCredentialsAddCmd.MarkFlagRequired("description")
+ githubCredentialsAddCmd.MarkFlagRequired("endpoint")
+
+ githubCredentialsCmd.AddCommand(
+ githubCredentialsListCmd,
+ githubCredentialsShowCmd,
+ githubCredentialsUpdateCmd,
+ githubCredentialsDeleteCmd,
+ githubCredentialsAddCmd,
+ )
+ githubCmd.AddCommand(githubCredentialsCmd)
+
+ credentialsCmd.AddCommand(githubCredentialsListCmd)
+ rootCmd.AddCommand(credentialsCmd)
+}
+
+func parsePrivateKeyFromPath(path string) ([]byte, error) {
+ if _, err := os.Stat(path); err != nil {
+ return nil, fmt.Errorf("private key file not found: %s", credentialsPrivateKeyPath)
+ }
+ keyContents, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read private key file: %w", err)
+ }
+ pemBlock, _ := pem.Decode(keyContents)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("failed to decode PEM block")
+ }
+ if _, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err != nil {
+ return nil, fmt.Errorf("failed to parse private key: %w", err)
+ }
+ return keyContents, nil
+}
+
+func parseCredentialsAddParams() (ret params.CreateGithubCredentialsParams, err error) {
+ ret.Name = credentialsName
+ ret.Description = credentialsDescription
+ ret.AuthType = params.ForgeAuthType(credentialsType)
+ ret.Endpoint = credentialsEndpoint
+ switch ret.AuthType {
+ case params.ForgeAuthTypePAT:
+ ret.PAT.OAuth2Token = credentialsOAuthToken
+ case params.ForgeAuthTypeApp:
+ ret.App.InstallationID = credentialsAppInstallationID
+ ret.App.AppID = credentialsAppID
+ keyContents, err := parsePrivateKeyFromPath(credentialsPrivateKeyPath)
+ if err != nil {
+ return params.CreateGithubCredentialsParams{}, err
+ }
+ ret.App.PrivateKeyBytes = keyContents
+ default:
+ return params.CreateGithubCredentialsParams{}, fmt.Errorf("invalid auth type: %s (supported are: app, pat)", credentialsType)
+ }
+
+ return ret, nil
+}
+
+func parseCredentialsUpdateParams() (params.UpdateGithubCredentialsParams, error) {
+ var updateParams params.UpdateGithubCredentialsParams
+
+ if credentialsAppInstallationID != 0 || credentialsAppID != 0 || credentialsPrivateKeyPath != "" {
+ updateParams.App = ¶ms.GithubApp{}
+ }
+
+ if credentialsName != "" {
+ updateParams.Name = &credentialsName
+ }
+
+ if credentialsDescription != "" {
+ updateParams.Description = &credentialsDescription
+ }
+
+ if credentialsOAuthToken != "" {
+ if updateParams.PAT == nil {
+ updateParams.PAT = ¶ms.GithubPAT{}
+ }
+ updateParams.PAT.OAuth2Token = credentialsOAuthToken
+ }
+
+ if credentialsAppInstallationID != 0 {
+ updateParams.App.InstallationID = credentialsAppInstallationID
+ }
+
+ if credentialsAppID != 0 {
+ updateParams.App.AppID = credentialsAppID
+ }
+
+ if credentialsPrivateKeyPath != "" {
+ keyContents, err := parsePrivateKeyFromPath(credentialsPrivateKeyPath)
+ if err != nil {
+ return params.UpdateGithubCredentialsParams{}, err
+ }
+ updateParams.App.PrivateKeyBytes = keyContents
+ }
+
+ return updateParams, nil
+}
+
+func formatGithubCredentials(creds []params.ForgeCredentials) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(creds)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"ID", "Name", "Description", "Base URL", "API URL", "Upload URL", "Type"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
+ t.AppendHeader(header)
+ for _, val := range creds {
+ row := table.Row{val.ID, val.Name, val.Description, val.BaseURL, val.APIBaseURL, val.UploadBaseURL, val.AuthType}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
+ t.AppendSeparator()
+ }
+ fmt.Println(t.Render())
+}
+
+func formatOneGithubCredential(cred params.ForgeCredentials) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(cred)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"Field", "Value"}
+ t.AppendHeader(header)
+
+ var resetMinutes float64
+ if cred.RateLimit != nil {
+ resetMinutes = cred.RateLimit.ResetIn().Minutes()
+ }
+
+ t.AppendRow(table.Row{"ID", cred.ID})
+ t.AppendRow(table.Row{"Created At", cred.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", cred.UpdatedAt})
+ t.AppendRow(table.Row{"Name", cred.Name})
+ t.AppendRow(table.Row{"Description", cred.Description})
+ t.AppendRow(table.Row{"Base URL", cred.BaseURL})
+ t.AppendRow(table.Row{"API URL", cred.APIBaseURL})
+ t.AppendRow(table.Row{"Upload URL", cred.UploadBaseURL})
+ t.AppendRow(table.Row{"Type", cred.AuthType})
+ t.AppendRow(table.Row{"Endpoint", cred.Endpoint.Name})
+ if resetMinutes > 0 {
+ t.AppendRow(table.Row{"", ""})
+ t.AppendRow(table.Row{"Remaining API requests", cred.RateLimit.Remaining})
+ t.AppendRow(table.Row{"Rate limit reset", fmt.Sprintf("%d minutes", int64(resetMinutes))})
+ }
+
+ if len(cred.Repositories) > 0 {
+ t.AppendRow(table.Row{"", ""})
+ for _, repo := range cred.Repositories {
+ t.AppendRow(table.Row{"Repositories", repo.String()})
+ }
+ }
+
+ if len(cred.Organizations) > 0 {
+ t.AppendRow(table.Row{"", ""})
+ for _, org := range cred.Organizations {
+ t.AppendRow(table.Row{"Organizations", org.Name})
+ }
+ }
+
+ if len(cred.Enterprises) > 0 {
+ t.AppendRow(table.Row{"", ""})
+ for _, ent := range cred.Enterprises {
+ t.AppendRow(table.Row{"Enterprises", ent.Name})
+ }
+ }
+
+ t.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 1, AutoMerge: true},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
+ })
+ fmt.Println(t.Render())
+}
diff --git a/cmd/garm-cli/cmd/github_endpoints.go b/cmd/garm-cli/cmd/github_endpoints.go
new file mode 100644
index 00000000..61f46810
--- /dev/null
+++ b/cmd/garm-cli/cmd/github_endpoints.go
@@ -0,0 +1,315 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package cmd
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "os"
+
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientEndpoints "github.com/cloudbase/garm/client/endpoints"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
+)
+
+var githubEndpointCmd = &cobra.Command{
+ Use: "endpoint",
+ SilenceUsage: true,
+ Short: "Manage GitHub endpoints",
+ Long: `Manage GitHub endpoints.
+
+This command allows you to configure and manage GitHub endpoints`,
+ Run: nil,
+}
+
+var githubEndpointListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ SilenceUsage: true,
+ Short: "List GitHub endpoints",
+ Long: `List all configured GitHub endpoints.`,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ newGHListReq := apiClientEndpoints.NewListGithubEndpointsParams()
+ response, err := apiCli.Endpoints.ListGithubEndpoints(newGHListReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatEndpoints(response.Payload)
+ return nil
+ },
+}
+
+var githubEndpointShowCmd = &cobra.Command{
+ Use: "show",
+ Aliases: []string{"get"},
+ SilenceUsage: true,
+ Short: "Show GitHub endpoint",
+ Long: `Show details of a GitHub endpoint.`,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ newGHShowReq := apiClientEndpoints.NewGetGithubEndpointParams()
+ newGHShowReq.Name = args[0]
+ response, err := apiCli.Endpoints.GetGithubEndpoint(newGHShowReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+var githubEndpointCreateCmd = &cobra.Command{
+ Use: "create",
+ SilenceUsage: true,
+ Short: "Create GitHub endpoint",
+ Long: `Create a new GitHub endpoint.`,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ createParams, err := parseCreateParams()
+ if err != nil {
+ return err
+ }
+
+ newGHCreateReq := apiClientEndpoints.NewCreateGithubEndpointParams()
+ newGHCreateReq.Body = createParams
+
+ response, err := apiCli.Endpoints.CreateGithubEndpoint(newGHCreateReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+var githubEndpointDeleteCmd = &cobra.Command{
+ Use: "delete",
+ Aliases: []string{"remove", "rm"},
+ SilenceUsage: true,
+ Short: "Delete GitHub endpoint",
+ Long: "Delete a GitHub endpoint",
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ newGHDeleteReq := apiClientEndpoints.NewDeleteGithubEndpointParams()
+ newGHDeleteReq.Name = args[0]
+ if err := apiCli.Endpoints.DeleteGithubEndpoint(newGHDeleteReq, authToken); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var githubEndpointUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update GitHub endpoint",
+ Long: "Update a GitHub endpoint",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an endpoint name")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ updateParams := params.UpdateGithubEndpointParams{}
+
+ if cmd.Flags().Changed("ca-cert-path") {
+ cert, err := parseAndReadCABundle()
+ if err != nil {
+ return err
+ }
+ updateParams.CACertBundle = cert
+ }
+
+ if cmd.Flags().Changed("description") {
+ updateParams.Description = &endpointDescription
+ }
+
+ if cmd.Flags().Changed("base-url") {
+ updateParams.BaseURL = &endpointBaseURL
+ }
+
+ if cmd.Flags().Changed("upload-url") {
+ updateParams.UploadBaseURL = &endpointUploadURL
+ }
+
+ if cmd.Flags().Changed("api-base-url") {
+ updateParams.APIBaseURL = &endpointAPIBaseURL
+ }
+
+ newGHEndpointUpdateReq := apiClientEndpoints.NewUpdateGithubEndpointParams()
+ newGHEndpointUpdateReq.Name = args[0]
+ newGHEndpointUpdateReq.Body = updateParams
+
+ response, err := apiCli.Endpoints.UpdateGithubEndpoint(newGHEndpointUpdateReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneEndpoint(response.Payload)
+ return nil
+ },
+}
+
+func init() {
+ githubEndpointCreateCmd.Flags().StringVar(&endpointName, "name", "", "Name of the GitHub endpoint")
+ githubEndpointCreateCmd.Flags().StringVar(&endpointDescription, "description", "", "Description for the github endpoint")
+ githubEndpointCreateCmd.Flags().StringVar(&endpointBaseURL, "base-url", "", "Base URL of the GitHub endpoint")
+ githubEndpointCreateCmd.Flags().StringVar(&endpointUploadURL, "upload-url", "", "Upload URL of the GitHub endpoint")
+ githubEndpointCreateCmd.Flags().StringVar(&endpointAPIBaseURL, "api-base-url", "", "API Base URL of the GitHub endpoint")
+ githubEndpointCreateCmd.Flags().StringVar(&endpointCACertPath, "ca-cert-path", "", "CA Cert Path of the GitHub endpoint")
+
+ githubEndpointListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+
+ githubEndpointCreateCmd.MarkFlagRequired("name")
+ githubEndpointCreateCmd.MarkFlagRequired("base-url")
+ githubEndpointCreateCmd.MarkFlagRequired("api-base-url")
+ githubEndpointCreateCmd.MarkFlagRequired("upload-url")
+
+ githubEndpointUpdateCmd.Flags().StringVar(&endpointDescription, "description", "", "Description for the github endpoint")
+ githubEndpointUpdateCmd.Flags().StringVar(&endpointBaseURL, "base-url", "", "Base URL of the GitHub endpoint")
+ githubEndpointUpdateCmd.Flags().StringVar(&endpointUploadURL, "upload-url", "", "Upload URL of the GitHub endpoint")
+ githubEndpointUpdateCmd.Flags().StringVar(&endpointAPIBaseURL, "api-base-url", "", "API Base URL of the GitHub endpoint")
+ githubEndpointUpdateCmd.Flags().StringVar(&endpointCACertPath, "ca-cert-path", "", "CA Cert Path of the GitHub endpoint")
+
+ githubEndpointCmd.AddCommand(
+ githubEndpointListCmd,
+ githubEndpointShowCmd,
+ githubEndpointCreateCmd,
+ githubEndpointDeleteCmd,
+ githubEndpointUpdateCmd,
+ )
+
+ githubCmd.AddCommand(githubEndpointCmd)
+}
+
+func parseAndReadCABundle() ([]byte, error) {
+ if endpointCACertPath == "" {
+ return nil, nil
+ }
+
+ if _, err := os.Stat(endpointCACertPath); os.IsNotExist(err) {
+ return nil, fmt.Errorf("CA cert file not found: %s", endpointCACertPath)
+ }
+ contents, err := os.ReadFile(endpointCACertPath)
+ if err != nil {
+ return nil, err
+ }
+ pemBlock, _ := pem.Decode(contents)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("failed to decode PEM block")
+ }
+ if _, err := x509.ParseCertificates(pemBlock.Bytes); err != nil {
+ return nil, fmt.Errorf("failed to parse CA cert bundle: %w", err)
+ }
+ return contents, nil
+}
+
+func parseCreateParams() (params.CreateGithubEndpointParams, error) {
+ certBundleBytes, err := parseAndReadCABundle()
+ if err != nil {
+ return params.CreateGithubEndpointParams{}, err
+ }
+
+ ret := params.CreateGithubEndpointParams{
+ Name: endpointName,
+ BaseURL: endpointBaseURL,
+ UploadBaseURL: endpointUploadURL,
+ APIBaseURL: endpointAPIBaseURL,
+ Description: endpointDescription,
+ CACertBundle: certBundleBytes,
+ }
+ return ret, nil
+}
+
+func formatEndpoints(endpoints params.ForgeEndpoints) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(endpoints)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"Name", "Base URL", "Description"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
+ t.AppendHeader(header)
+ for _, val := range endpoints {
+ row := table.Row{val.Name, val.BaseURL, val.Description}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
+ t.AppendSeparator()
+ }
+ fmt.Println(t.Render())
+}
+
+func formatOneEndpoint(endpoint params.ForgeEndpoint) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(endpoint)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"Field", "Value"}
+ t.AppendHeader(header)
+ t.AppendRow([]interface{}{"Name", endpoint.Name})
+ t.AppendRow([]interface{}{"Description", endpoint.Description})
+ t.AppendRow([]interface{}{"Created At", endpoint.CreatedAt})
+ t.AppendRow([]interface{}{"Updated At", endpoint.UpdatedAt})
+ t.AppendRow([]interface{}{"Base URL", endpoint.BaseURL})
+ if endpoint.UploadBaseURL != "" {
+ t.AppendRow([]interface{}{"Upload URL", endpoint.UploadBaseURL})
+ }
+ t.AppendRow([]interface{}{"API Base URL", endpoint.APIBaseURL})
+ if len(endpoint.CACertBundle) > 0 {
+ t.AppendRow([]interface{}{"CA Cert Bundle", string(endpoint.CACertBundle)})
+ }
+ t.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 1, AutoMerge: true},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
+ })
+ fmt.Println(t.Render())
+}
diff --git a/cmd/garm-cli/cmd/init.go b/cmd/garm-cli/cmd/init.go
index 3c879c3f..c544699e 100644
--- a/cmd/garm-cli/cmd/init.go
+++ b/cmd/garm-cli/cmd/init.go
@@ -16,15 +16,26 @@ package cmd
import (
"fmt"
+ "net/url"
"strings"
+ openapiRuntimeClient "github.com/go-openapi/runtime/client"
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientController "github.com/cloudbase/garm/client/controller"
+ apiClientFirstRun "github.com/cloudbase/garm/client/first_run"
+ apiClientLogin "github.com/cloudbase/garm/client/login"
"github.com/cloudbase/garm/cmd/garm-cli/common"
"github.com/cloudbase/garm/cmd/garm-cli/config"
"github.com/cloudbase/garm/params"
+)
- "github.com/jedib0t/go-pretty/v6/table"
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
+var (
+ callbackURL string
+ metadataURL string
+ webhookURL string
+ minimumJobAgeBackoff uint
)
// initCmd represents the init command
@@ -43,57 +54,95 @@ Example usage:
garm-cli init --name=dev --url=https://runner.example.com --username=admin --password=superSecretPassword
`,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if cfg != nil {
if cfg.HasManager(loginProfileName) {
return fmt.Errorf("a manager with name %s already exists in your local config", loginProfileName)
}
}
+ url := strings.TrimSuffix(loginURL, "/")
if err := promptUnsetInitVariables(); err != nil {
return err
}
- newUser := params.NewUserParams{
+ ensureDefaultEndpoints(url)
+
+ newUserReq := apiClientFirstRun.NewFirstRunParams()
+ newUserReq.Body = params.NewUserParams{
Username: loginUserName,
Password: loginPassword,
FullName: loginFullName,
Email: loginEmail,
}
+ initAPIClient(url, "")
- url := strings.TrimSuffix(loginURL, "/")
- response, err := cli.InitManager(url, newUser)
+ response, err := apiCli.FirstRun.FirstRun(newUserReq, authToken)
if err != nil {
- return errors.Wrap(err, "initializing manager")
+ return fmt.Errorf("error initializing manager: %w", err)
}
- loginParams := params.PasswordLoginParams{
+ newLoginParamsReq := apiClientLogin.NewLoginParams()
+ newLoginParamsReq.Body = params.PasswordLoginParams{
Username: loginUserName,
Password: loginPassword,
}
- token, err := cli.Login(url, loginParams)
+ token, err := apiCli.Login.Login(newLoginParamsReq, authToken)
if err != nil {
- return errors.Wrap(err, "authenticating")
+ return fmt.Errorf("error authenticating: %w", err)
}
cfg.Managers = append(cfg.Managers, config.Manager{
Name: loginProfileName,
BaseURL: url,
- Token: token,
+ Token: token.Payload.Token,
})
+ authToken = openapiRuntimeClient.BearerToken(token.Payload.Token)
cfg.ActiveManager = loginProfileName
if err := cfg.SaveConfig(); err != nil {
- return errors.Wrap(err, "saving config")
+ return fmt.Errorf("error saving config: %w", err)
}
- renderUserTable(response)
+ updateUrlsReq := apiClientController.NewUpdateControllerParams()
+ updateUrlsReq.Body = params.UpdateControllerParams{
+ MetadataURL: &metadataURL,
+ CallbackURL: &callbackURL,
+ WebhookURL: &webhookURL,
+ }
+
+ controllerInfoResponse, err := apiCli.Controller.UpdateController(updateUrlsReq, authToken)
+ renderResponseMessage(response.Payload, controllerInfoResponse, err)
return nil
},
}
+func ensureDefaultEndpoints(loginURL string) (err error) {
+ if metadataURL == "" {
+ metadataURL, err = url.JoinPath(loginURL, "api/v1/metadata")
+ if err != nil {
+ return err
+ }
+ }
+
+ if callbackURL == "" {
+ callbackURL, err = url.JoinPath(loginURL, "api/v1/callbacks")
+ if err != nil {
+ return err
+ }
+ }
+
+ if webhookURL == "" {
+ webhookURL, err = url.JoinPath(loginURL, "webhooks")
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func promptUnsetInitVariables() error {
var err error
if loginUserName == "" {
@@ -111,11 +160,18 @@ func promptUnsetInitVariables() error {
}
if loginPassword == "" {
- loginPassword, err = common.PromptPassword("Password")
+ passwd, err := common.PromptPassword("Password", "")
if err != nil {
return err
}
+
+ _, err = common.PromptPassword("Confirm password", passwd)
+ if err != nil {
+ return err
+ }
+ loginPassword = passwd
}
+
return nil
}
@@ -126,13 +182,16 @@ func init() {
initCmd.Flags().StringVarP(&loginURL, "url", "a", "", "The base URL for the runner manager API")
initCmd.Flags().StringVarP(&loginUserName, "username", "u", "", "The desired administrative username")
initCmd.Flags().StringVarP(&loginEmail, "email", "e", "", "Email address")
+ initCmd.Flags().StringVarP(&metadataURL, "metadata-url", "m", "", "The metadata URL for the controller (ie. https://garm.example.com/api/v1/metadata)")
+ initCmd.Flags().StringVarP(&callbackURL, "callback-url", "c", "", "The callback URL for the controller (ie. https://garm.example.com/api/v1/callbacks)")
+ initCmd.Flags().StringVarP(&webhookURL, "webhook-url", "w", "", "The webhook URL for the controller (ie. https://garm.example.com/webhooks)")
initCmd.Flags().StringVarP(&loginFullName, "full-name", "f", "", "Full name of the user")
initCmd.Flags().StringVarP(&loginPassword, "password", "p", "", "The admin password")
initCmd.MarkFlagRequired("name") //nolint
initCmd.MarkFlagRequired("url") //nolint
}
-func renderUserTable(user params.User) {
+func renderUserTable(user params.User) string {
t := table.NewWriter()
header := table.Row{"Field", "Value"}
t.AppendHeader(header)
@@ -141,5 +200,53 @@ func renderUserTable(user params.User) {
t.AppendRow(table.Row{"Username", user.Username})
t.AppendRow(table.Row{"Email", user.Email})
t.AppendRow(table.Row{"Enabled", user.Enabled})
- fmt.Println(t.Render())
+ return t.Render()
+}
+
+func renderResponseMessage(user params.User, controllerInfo *apiClientController.UpdateControllerOK, controllerURLUpdateErr error) {
+ headerMsg := `Congrats! Your controller is now initialized.
+
+Following are the details of the admin user and details about the controller.
+
+Admin user information:
+
+%s
+`
+
+ controllerMsg := `Controller information:
+
+%s
+
+Make sure that the URLs in the table above are reachable by the relevant parties.
+
+The metadata and callback URLs *must* be accessible by the runners that GARM spins up.
+The base webhook and the controller webhook URLs must be accessible by GitHub or GHES.
+`
+
+ controllerErrorMsg := `WARNING: Failed to set the required controller URLs with error: %q
+
+Please run:
+
+ garm-cli controller show
+
+To make sure that the callback, metadata and webhook URLs are set correctly. If not,
+you must set them up by running:
+
+ garm-cli controller update \
+ --metadata-url= \
+ --callback-url= \
+ --webhook-url=
+
+See the help message for garm-cli controller update for more information.
+`
+ var ctrlMsg string
+ if controllerURLUpdateErr != nil || controllerInfo == nil {
+ ctrlMsg = fmt.Sprintf(controllerErrorMsg, controllerURLUpdateErr)
+ } else {
+ controllerInfoTable := renderControllerInfoTable(controllerInfo.Payload)
+ ctrlMsg = fmt.Sprintf(controllerMsg, controllerInfoTable)
+ }
+
+ userTable := renderUserTable(user)
+ fmt.Printf("%s\n%s\n", fmt.Sprintf(headerMsg, userTable), ctrlMsg)
}
diff --git a/cmd/garm-cli/cmd/jobs.go b/cmd/garm-cli/cmd/jobs.go
new file mode 100644
index 00000000..1ce372cb
--- /dev/null
+++ b/cmd/garm-cli/cmd/jobs.go
@@ -0,0 +1,87 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/uuid"
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientJobs "github.com/cloudbase/garm/client/jobs"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
+)
+
+// runnerCmd represents the runner command
+var jobsCmd = &cobra.Command{
+ Use: "job",
+ SilenceUsage: true,
+ Short: "Information about jobs",
+ Long: `Query information about jobs.`,
+ Run: nil,
+}
+
+var jobsListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ Short: "List jobs",
+ Long: `List all jobs currently recorded in the system.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ listJobsReq := apiClientJobs.NewListJobsParams()
+ response, err := apiCli.Jobs.ListJobs(listJobsReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatJobs(response.Payload)
+ return nil
+ },
+}
+
+func formatJobs(jobs []params.Job) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(jobs)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"ID", "Name", "Status", "Conclusion", "Runner Name", "Repository", "Requested Labels", "Locked by"}
+ t.AppendHeader(header)
+
+ for _, job := range jobs {
+ lockedBy := ""
+ repo := fmt.Sprintf("%s/%s", job.RepositoryOwner, job.RepositoryName)
+ if job.LockedBy != uuid.Nil {
+ lockedBy = job.LockedBy.String()
+ }
+ t.AppendRow(table.Row{job.ID, job.Name, job.Status, job.Conclusion, job.RunnerName, repo, strings.Join(job.Labels, " "), lockedBy})
+ t.AppendSeparator()
+ }
+ fmt.Println(t.Render())
+}
+
+func init() {
+ jobsCmd.AddCommand(
+ jobsListCmd,
+ )
+
+ rootCmd.AddCommand(jobsCmd)
+}
diff --git a/cmd/garm-cli/cmd/log.go b/cmd/garm-cli/cmd/log.go
index 4b7e031c..a7d2dfba 100644
--- a/cmd/garm-cli/cmd/log.go
+++ b/cmd/garm-cli/cmd/log.go
@@ -1,20 +1,34 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
package cmd
import (
- "encoding/json"
- "fmt"
- "log"
- "net/http"
- "net/url"
- "os"
+ "context"
"os/signal"
- "time"
+ "strings"
- apiParams "github.com/cloudbase/garm/apiserver/params"
- "github.com/cloudbase/garm/util"
-
- "github.com/gorilla/websocket"
"github.com/spf13/cobra"
+
+ garmWs "github.com/cloudbase/garm-provider-common/util/websocket"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+)
+
+var (
+ eventsFilters string
+ logLevel string
+ filters []string
+ enableColor bool
)
var logCmd = &cobra.Command{
@@ -22,79 +36,40 @@ var logCmd = &cobra.Command{
SilenceUsage: true,
Short: "Stream garm log",
Long: `Stream all garm logging to the terminal.`,
- RunE: func(cmd *cobra.Command, args []string) error {
- interrupt := make(chan os.Signal, 1)
- signal.Notify(interrupt, os.Interrupt)
+ RunE: func(_ *cobra.Command, _ []string) error {
+ ctx, stop := signal.NotifyContext(context.Background(), signals...)
+ defer stop()
- parsedURL, err := url.Parse(mgr.BaseURL)
+ // Parse filters into map
+ attributeFilters := make(map[string]string)
+ for _, filter := range filters {
+ parts := strings.SplitN(filter, "=", 2)
+ if len(parts) == 2 {
+ attributeFilters[parts[0]] = parts[1]
+ }
+ }
+
+ // Create log formatter with filters
+ logFormatter := common.NewLogFormatter(logLevel, attributeFilters, enableColor)
+
+ reader, err := garmWs.NewReader(ctx, mgr.BaseURL, "/api/v1/ws/logs", mgr.Token, logFormatter.FormatWebsocketMessage)
if err != nil {
return err
}
- wsScheme := "ws"
- if parsedURL.Scheme == "https" {
- wsScheme = "wss"
+ if err := reader.Start(); err != nil {
+ return err
}
- u := url.URL{Scheme: wsScheme, Host: parsedURL.Host, Path: "/api/v1/ws"}
- log.Printf("connecting to %s", u.String())
- header := http.Header{}
- header.Add("Authorization", fmt.Sprintf("Bearer %s", mgr.Token))
-
- c, response, err := websocket.DefaultDialer.Dial(u.String(), header)
- if err != nil {
- var resp apiParams.APIErrorResponse
- var msg string
- if err := json.NewDecoder(response.Body).Decode(&resp); err == nil {
- msg = resp.Details
- }
- log.Fatalf("failed to stream logs: %s (%s)", msg, response.Status)
- }
- defer c.Close()
-
- done := make(chan struct{})
-
- go func() {
- defer close(done)
- for {
- _, message, err := c.ReadMessage()
- if err != nil {
- log.Printf("read: %q", err)
- return
- }
- log.Print(util.SanitizeLogEntry(string(message)))
- }
- }()
-
- ticker := time.NewTicker(time.Second)
- defer ticker.Stop()
-
- for {
- select {
- case <-done:
- return nil
- case t := <-ticker.C:
- err := c.WriteMessage(websocket.TextMessage, []byte(t.String()))
- if err != nil {
- return err
- }
- case <-interrupt:
- // Cleanly close the connection by sending a close message and then
- // waiting (with timeout) for the server to close the connection.
- err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
- if err != nil {
- return err
- }
- select {
- case <-done:
- case <-time.After(time.Second):
- }
- return nil
- }
- }
+ <-reader.Done()
+ return nil
},
}
func init() {
+ logCmd.Flags().StringVar(&logLevel, "log-level", "", "Minimum log level to display (DEBUG, INFO, WARN, ERROR)")
+ logCmd.Flags().StringArrayVar(&filters, "filter", []string{}, "Filter logs by attribute (format: key=value) or message content (msg=text). You can specify this option multiple times. The filter will return true for any of the attributes you set.")
+ logCmd.Flags().BoolVar(&enableColor, "enable-color", true, "Enable color logging (auto-detects terminal support)")
+
rootCmd.AddCommand(logCmd)
}
diff --git a/cmd/garm-cli/cmd/metrics.go b/cmd/garm-cli/cmd/metrics.go
index 4069e3e6..ea1fd7ca 100644
--- a/cmd/garm-cli/cmd/metrics.go
+++ b/cmd/garm-cli/cmd/metrics.go
@@ -18,6 +18,8 @@ import (
"fmt"
"github.com/spf13/cobra"
+
+ apiClientMetricToken "github.com/cloudbase/garm/client/metrics_token"
)
// orgPoolCmd represents the pool command
@@ -34,16 +36,17 @@ var metricsTokenCreateCmd = &cobra.Command{
Short: "Create a metrics token",
Long: `Create a metrics token.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
- token, err := cli.CreateMetricsToken()
+ showMetricsTokenReq := apiClientMetricToken.NewGetMetricsTokenParams()
+ response, err := apiCli.MetricsToken.GetMetricsToken(showMetricsTokenReq, authToken)
if err != nil {
return err
}
- fmt.Println(token)
+ fmt.Println(response.Payload.Token)
return nil
},
diff --git a/cmd/garm-cli/cmd/organization.go b/cmd/garm-cli/cmd/organization.go
index 34fe2746..b16812fa 100644
--- a/cmd/garm-cli/cmd/organization.go
+++ b/cmd/garm-cli/cmd/organization.go
@@ -16,17 +16,26 @@ package cmd
import (
"fmt"
-
- "github.com/cloudbase/garm/params"
+ "strings"
"github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ "github.com/cloudbase/garm-provider-common/util"
+ apiClientOrgs "github.com/cloudbase/garm/client/organizations"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
var (
- orgName string
- orgWebhookSecret string
- orgCreds string
+ orgName string
+ orgEndpoint string
+ orgWebhookSecret string
+ orgCreds string
+ orgRandomWebhookSecret bool
+ insecureOrgWebhook bool
+ keepOrgWebhook bool
+ installOrgWebhook bool
)
// organizationCmd represents the organization command
@@ -43,27 +52,201 @@ organization for which garm maintains pools of self hosted runners.`,
Run: nil,
}
+var orgWebhookCmd = &cobra.Command{
+ Use: "webhook",
+ Short: "Manage organization webhooks",
+ Long: `Manage organization webhooks.`,
+ SilenceUsage: true,
+ Run: nil,
+}
+
+var orgWebhookInstallCmd = &cobra.Command{
+ Use: "install",
+ Short: "Install webhook",
+ Long: `Install webhook for an organization.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an organization ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
+ if err != nil {
+ return err
+ }
+
+ installWebhookReq := apiClientOrgs.NewInstallOrgWebhookParams()
+ installWebhookReq.OrgID = orgID
+ installWebhookReq.Body.InsecureSSL = insecureOrgWebhook
+ installWebhookReq.Body.WebhookEndpointType = params.WebhookEndpointDirect
+
+ response, err := apiCli.Organizations.InstallOrgWebhook(installWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneHookInfo(response.Payload)
+ return nil
+ },
+}
+
+var orgHookInfoShowCmd = &cobra.Command{
+ Use: "show",
+ Short: "Show webhook info",
+ Long: `Show webhook info for an organization.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an organization ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
+ if err != nil {
+ return err
+ }
+ showWebhookInfoReq := apiClientOrgs.NewGetOrgWebhookInfoParams()
+ showWebhookInfoReq.OrgID = orgID
+
+ response, err := apiCli.Organizations.GetOrgWebhookInfo(showWebhookInfoReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneHookInfo(response.Payload)
+ return nil
+ },
+}
+
+var orgWebhookUninstallCmd = &cobra.Command{
+ Use: "uninstall",
+ Short: "Uninstall webhook",
+ Long: `Uninstall webhook for an organization.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires an organization ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
+ if err != nil {
+ return err
+ }
+
+ uninstallWebhookReq := apiClientOrgs.NewUninstallOrgWebhookParams()
+ uninstallWebhookReq.OrgID = orgID
+
+ err = apiCli.Organizations.UninstallOrgWebhook(uninstallWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
var orgAddCmd = &cobra.Command{
Use: "add",
Aliases: []string{"create"},
Short: "Add organization",
Long: `Add a new organization to the manager.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
- newOrgReq := params.CreateOrgParams{
- Name: orgName,
- WebhookSecret: orgWebhookSecret,
- CredentialsName: orgCreds,
+ if orgRandomWebhookSecret {
+ secret, err := util.GetRandomString(32)
+ if err != nil {
+ return err
+ }
+ orgWebhookSecret = secret
}
- org, err := cli.CreateOrganization(newOrgReq)
+
+ newOrgReq := apiClientOrgs.NewCreateOrgParams()
+ newOrgReq.Body = params.CreateOrgParams{
+ Name: orgName,
+ WebhookSecret: orgWebhookSecret,
+ CredentialsName: orgCreds,
+ ForgeType: params.EndpointType(forgeType),
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
+ }
+ response, err := apiCli.Organizations.CreateOrg(newOrgReq, authToken)
if err != nil {
return err
}
- formatOneOrganization(org)
+
+ if installOrgWebhook {
+ installWebhookReq := apiClientOrgs.NewInstallOrgWebhookParams()
+ installWebhookReq.OrgID = response.Payload.ID
+ installWebhookReq.Body.WebhookEndpointType = params.WebhookEndpointDirect
+
+ _, err = apiCli.Organizations.InstallOrgWebhook(installWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ }
+
+ getOrgRequest := apiClientOrgs.NewGetOrgParams()
+ getOrgRequest.OrgID = response.Payload.ID
+ org, err := apiCli.Organizations.GetOrg(getOrgRequest, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneOrganization(org.Payload)
+ return nil
+ },
+}
+
+var orgUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update organization",
+ Long: `Update organization credentials or webhook secret.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) == 0 {
+ return fmt.Errorf("command requires a organization ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
+ if err != nil {
+ return err
+ }
+
+ updateOrgReq := apiClientOrgs.NewUpdateOrgParams()
+ updateOrgReq.Body = params.UpdateEntityParams{
+ WebhookSecret: orgWebhookSecret,
+ CredentialsName: orgCreds,
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
+ }
+ updateOrgReq.OrgID = orgID
+ response, err := apiCli.Organizations.UpdateOrg(updateOrgReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneOrganization(response.Payload)
return nil
},
}
@@ -74,16 +257,19 @@ var orgListCmd = &cobra.Command{
Short: "List organizations",
Long: `List all configured organizations that are currently managed.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
- orgs, err := cli.ListOrganizations()
+ listOrgsReq := apiClientOrgs.NewListOrgsParams()
+ listOrgsReq.Name = &orgName
+ listOrgsReq.Endpoint = &orgEndpoint
+ response, err := apiCli.Organizations.ListOrgs(listOrgsReq, authToken)
if err != nil {
return err
}
- formatOrganizations(orgs)
+ formatOrganizations(response.Payload)
return nil
},
}
@@ -93,7 +279,7 @@ var orgShowCmd = &cobra.Command{
Short: "Show details for one organization",
Long: `Displays detailed information about a single organization.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -103,11 +289,19 @@ var orgShowCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
- org, err := cli.GetOrganization(args[0])
+
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
if err != nil {
return err
}
- formatOneOrganization(org)
+
+ showOrgReq := apiClientOrgs.NewGetOrgParams()
+ showOrgReq.OrgID = orgID
+ response, err := apiCli.Organizations.GetOrg(showOrgReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneOrganization(response.Payload)
return nil
},
}
@@ -118,7 +312,7 @@ var orgDeleteCmd = &cobra.Command{
Short: "Removes one organization",
Long: `Delete one organization from the manager.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -128,7 +322,16 @@ var orgDeleteCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
- if err := cli.DeleteOrganization(args[0]); err != nil {
+
+ orgID, err := resolveOrganization(args[0], orgEndpoint)
+ if err != nil {
+ return err
+ }
+
+ deleteOrgReq := apiClientOrgs.NewDeleteOrgParams()
+ deleteOrgReq.OrgID = orgID
+ deleteOrgReq.KeepWebhook = &keepOrgWebhook
+ if err := apiCli.Organizations.DeleteOrg(deleteOrgReq, authToken); err != nil {
return err
}
return nil
@@ -136,41 +339,99 @@ var orgDeleteCmd = &cobra.Command{
}
func init() {
-
orgAddCmd.Flags().StringVar(&orgName, "name", "", "The name of the organization")
+ orgAddCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", string(params.PoolBalancerTypeRoundRobin), "The balancing strategy to use when creating runners in pools matching requested labels.")
orgAddCmd.Flags().StringVar(&orgWebhookSecret, "webhook-secret", "", "The webhook secret for this organization")
+ orgAddCmd.Flags().StringVar(&forgeType, "forge-type", "", "The forge type of the organization. Supported values: github, gitea.")
orgAddCmd.Flags().StringVar(&orgCreds, "credentials", "", "Credentials name. See credentials list.")
+ orgAddCmd.Flags().BoolVar(&orgRandomWebhookSecret, "random-webhook-secret", false, "Generate a random webhook secret for this organization.")
+ orgAddCmd.Flags().BoolVar(&installOrgWebhook, "install-webhook", false, "Install the webhook as part of the add operation.")
+ orgAddCmd.MarkFlagsMutuallyExclusive("webhook-secret", "random-webhook-secret")
+ orgAddCmd.MarkFlagsOneRequired("webhook-secret", "random-webhook-secret")
+
+ orgListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+ orgListCmd.Flags().StringVarP(&orgName, "name", "n", "", "Exact org name to filter by.")
+ orgListCmd.Flags().StringVarP(&orgEndpoint, "endpoint", "e", "", "Exact endpoint name to filter by.")
+
orgAddCmd.MarkFlagRequired("credentials") //nolint
orgAddCmd.MarkFlagRequired("name") //nolint
+ orgDeleteCmd.Flags().BoolVar(&keepOrgWebhook, "keep-webhook", false, "Do not delete any existing webhook when removing the organization from GARM.")
+ orgDeleteCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
+ orgShowCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
+ orgUpdateCmd.Flags().StringVar(&orgWebhookSecret, "webhook-secret", "", "The webhook secret for this organization")
+ orgUpdateCmd.Flags().StringVar(&orgCreds, "credentials", "", "Credentials name. See credentials list.")
+ orgUpdateCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", "", "The balancing strategy to use when creating runners in pools matching requested labels.")
+ orgUpdateCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
+ orgWebhookInstallCmd.Flags().BoolVar(&insecureOrgWebhook, "insecure", false, "Ignore self signed certificate errors.")
+ orgWebhookInstallCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
+ orgWebhookUninstallCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
+ orgHookInfoShowCmd.Flags().StringVar(&orgEndpoint, "endpoint", "", "When using the name of the org, the endpoint must be specified when multiple organizations with the same name exist.")
+
+ orgWebhookCmd.AddCommand(
+ orgWebhookInstallCmd,
+ orgWebhookUninstallCmd,
+ orgHookInfoShowCmd,
+ )
+
organizationCmd.AddCommand(
orgListCmd,
orgAddCmd,
orgShowCmd,
orgDeleteCmd,
+ orgUpdateCmd,
+ orgWebhookCmd,
)
rootCmd.AddCommand(organizationCmd)
}
func formatOrganizations(orgs []params.Organization) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(orgs)
+ return
+ }
t := table.NewWriter()
- header := table.Row{"ID", "Name", "Credentials name", "Pool mgr running"}
+ header := table.Row{"ID", "Name", "Endpoint", "Credentials name", "Pool Balancer Type", "Forge type", "Pool mgr running"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
t.AppendHeader(header)
for _, val := range orgs {
- t.AppendRow(table.Row{val.ID, val.Name, val.CredentialsName, val.PoolManagerStatus.IsRunning})
+ forgeType := val.Endpoint.EndpointType
+ if forgeType == "" {
+ forgeType = params.GithubEndpointType
+ }
+ row := table.Row{val.ID, val.Name, val.Endpoint.Name, val.CredentialsName, val.GetBalancerType(), forgeType, val.PoolManagerStatus.IsRunning}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
t.AppendSeparator()
}
fmt.Println(t.Render())
}
func formatOneOrganization(org params.Organization) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(org)
+ return
+ }
t := table.NewWriter()
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
header := table.Row{"Field", "Value"}
t.AppendHeader(header)
t.AppendRow(table.Row{"ID", org.ID})
+ t.AppendRow(table.Row{"Created At", org.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", org.UpdatedAt})
t.AppendRow(table.Row{"Name", org.Name})
+ t.AppendRow(table.Row{"Endpoint", org.Endpoint.Name})
+ t.AppendRow(table.Row{"Pool balancer type", org.GetBalancerType()})
t.AppendRow(table.Row{"Credentials", org.CredentialsName})
t.AppendRow(table.Row{"Pool manager running", org.PoolManagerStatus.IsRunning})
if !org.PoolManagerStatus.IsRunning {
@@ -181,9 +442,14 @@ func formatOneOrganization(org params.Organization) {
t.AppendRow(table.Row{"Pools", pool.ID}, rowConfigAutoMerge)
}
}
+ if len(org.Events) > 0 {
+ for _, event := range org.Events {
+ t.AppendRow(table.Row{"Events", fmt.Sprintf("%s %s: %s", event.CreatedAt.Format("2006-01-02T15:04:05"), strings.ToUpper(string(event.EventLevel)), event.Message)}, rowConfigAutoMerge)
+ }
+ }
t.SetColumnConfigs([]table.ColumnConfig{
{Number: 1, AutoMerge: true},
- {Number: 2, AutoMerge: false},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
})
fmt.Println(t.Render())
diff --git a/cmd/garm-cli/cmd/pool.go b/cmd/garm-cli/cmd/pool.go
index 8e1e994b..5b8cadf3 100644
--- a/cmd/garm-cli/cmd/pool.go
+++ b/cmd/garm-cli/cmd/pool.go
@@ -20,11 +20,16 @@ import (
"os"
"strings"
- "github.com/cloudbase/garm/params"
-
"github.com/jedib0t/go-pretty/v6/table"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
+ apiClientOrgs "github.com/cloudbase/garm/client/organizations"
+ apiClientPools "github.com/cloudbase/garm/client/pools"
+ apiClientRepos "github.com/cloudbase/garm/client/repositories"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
var (
@@ -46,8 +51,13 @@ var (
poolExtraSpecs string
poolAll bool
poolGitHubRunnerGroup string
+ priority uint
)
+type poolsPayloadGetter interface {
+ GetPayload() params.Pools
+}
+
// runnerCmd represents the runner command
var poolCmd = &cobra.Command{
Use: "pool",
@@ -76,9 +86,9 @@ Example:
garm-cli pool list --org=5493e51f-3170-4ce3-9f05-3fe690fc6ec6
List pools from one enterprise:
- garm-cli pool list --org=a8ee4c66-e762-4cbe-a35d-175dba2c9e62
+ garm-cli pool list --enterprise=a8ee4c66-e762-4cbe-a35d-175dba2c9e62
- List all pools from all repos and orgs:
+ List all pools from all repos, orgs and enterprises:
garm-cli pool list --all
`,
@@ -88,22 +98,38 @@ Example:
return errNeedsInitError
}
- var pools []params.Pool
+ var response poolsPayloadGetter
var err error
switch len(args) {
case 0:
if cmd.Flags().Changed("repo") {
- pools, err = cli.ListRepoPools(poolRepository)
+ poolRepository, err = resolveRepository(poolRepository, endpointName)
+ if err != nil {
+ return err
+ }
+ listRepoPoolsReq := apiClientRepos.NewListRepoPoolsParams()
+ listRepoPoolsReq.RepoID = poolRepository
+ response, err = apiCli.Repositories.ListRepoPools(listRepoPoolsReq, authToken)
} else if cmd.Flags().Changed("org") {
- pools, err = cli.ListOrgPools(poolOrganization)
+ poolOrganization, err = resolveOrganization(poolOrganization, endpointName)
+ if err != nil {
+ return err
+ }
+ listOrgPoolsReq := apiClientOrgs.NewListOrgPoolsParams()
+ listOrgPoolsReq.OrgID = poolOrganization
+ response, err = apiCli.Organizations.ListOrgPools(listOrgPoolsReq, authToken)
} else if cmd.Flags().Changed("enterprise") {
- pools, err = cli.ListEnterprisePools(poolEnterprise)
- } else if cmd.Flags().Changed("all") {
- pools, err = cli.ListAllPools()
+ poolEnterprise, err = resolveEnterprise(poolEnterprise, endpointName)
+ if err != nil {
+ return err
+ }
+ listEnterprisePoolsReq := apiClientEnterprises.NewListEnterprisePoolsParams()
+ listEnterprisePoolsReq.EnterpriseID = poolEnterprise
+ response, err = apiCli.Enterprises.ListEnterprisePools(listEnterprisePoolsReq, authToken)
} else {
- cmd.Help() //nolint
- os.Exit(0)
+ listPoolsReq := apiClientPools.NewListPoolsParams()
+ response, err = apiCli.Pools.ListPools(listPoolsReq, authToken)
}
default:
cmd.Help() //nolint
@@ -113,7 +139,7 @@ Example:
if err != nil {
return err
}
- formatPools(pools)
+ formatPools(response.GetPayload())
return nil
},
}
@@ -123,7 +149,7 @@ var poolShowCmd = &cobra.Command{
Short: "Show details for a runner",
Long: `Displays a detailed view of a single runner.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -136,11 +162,13 @@ var poolShowCmd = &cobra.Command{
return fmt.Errorf("too many arguments")
}
- pool, err := cli.GetPoolByID(args[0])
+ getPoolReq := apiClientPools.NewGetPoolParams()
+ getPoolReq.PoolID = args[0]
+ response, err := apiCli.Pools.GetPool(getPoolReq, authToken)
if err != nil {
return err
}
- formatOnePool(pool)
+ formatOnePool(response.Payload)
return nil
},
}
@@ -151,7 +179,7 @@ var poolDeleteCmd = &cobra.Command{
Short: "Delete pool by ID",
Long: `Delete one pool by referencing it's ID, regardless of repo or org.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -164,20 +192,26 @@ var poolDeleteCmd = &cobra.Command{
return fmt.Errorf("too many arguments")
}
- if err := cli.DeletePoolByID(args[0]); err != nil {
+ deletePoolReq := apiClientPools.NewDeletePoolParams()
+ deletePoolReq.PoolID = args[0]
+ if err := apiCli.Pools.DeletePool(deletePoolReq, authToken); err != nil {
return err
}
return nil
},
}
+type poolPayloadGetter interface {
+ GetPayload() params.Pool
+}
+
var poolAddCmd = &cobra.Command{
Use: "add",
Aliases: []string{"create"},
Short: "Add pool",
Long: `Add a new pool to a repository or organization.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(cmd *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
@@ -192,12 +226,13 @@ var poolAddCmd = &cobra.Command{
MinIdleRunners: poolMinIdleRunners,
Image: poolImage,
Flavor: poolFlavor,
- OSType: params.OSType(poolOSType),
- OSArch: params.OSArch(poolOSArch),
+ OSType: commonParams.OSType(poolOSType),
+ OSArch: commonParams.OSArch(poolOSArch),
Tags: tags,
Enabled: poolEnabled,
RunnerBootstrapTimeout: poolRunnerBootstrapTimeout,
GitHubRunnerGroup: poolGitHubRunnerGroup,
+ Priority: priority,
}
if cmd.Flags().Changed("extra-specs") {
@@ -220,15 +255,35 @@ var poolAddCmd = &cobra.Command{
return err
}
- var pool params.Pool
var err error
-
+ var response poolPayloadGetter
if cmd.Flags().Changed("repo") {
- pool, err = cli.CreateRepoPool(poolRepository, newPoolParams)
+ poolRepository, err = resolveRepository(poolRepository, endpointName)
+ if err != nil {
+ return err
+ }
+ newRepoPoolReq := apiClientRepos.NewCreateRepoPoolParams()
+ newRepoPoolReq.RepoID = poolRepository
+ newRepoPoolReq.Body = newPoolParams
+ response, err = apiCli.Repositories.CreateRepoPool(newRepoPoolReq, authToken)
} else if cmd.Flags().Changed("org") {
- pool, err = cli.CreateOrgPool(poolOrganization, newPoolParams)
+ poolOrganization, err = resolveOrganization(poolOrganization, endpointName)
+ if err != nil {
+ return err
+ }
+ newOrgPoolReq := apiClientOrgs.NewCreateOrgPoolParams()
+ newOrgPoolReq.OrgID = poolOrganization
+ newOrgPoolReq.Body = newPoolParams
+ response, err = apiCli.Organizations.CreateOrgPool(newOrgPoolReq, authToken)
} else if cmd.Flags().Changed("enterprise") {
- pool, err = cli.CreateEnterprisePool(poolEnterprise, newPoolParams)
+ poolEnterprise, err = resolveEnterprise(poolEnterprise, endpointName)
+ if err != nil {
+ return err
+ }
+ newEnterprisePoolReq := apiClientEnterprises.NewCreateEnterprisePoolParams()
+ newEnterprisePoolReq.EnterpriseID = poolEnterprise
+ newEnterprisePoolReq.Body = newPoolParams
+ response, err = apiCli.Enterprises.CreateEnterprisePool(newEnterprisePoolReq, authToken)
} else {
cmd.Help() //nolint
os.Exit(0)
@@ -237,7 +292,8 @@ var poolAddCmd = &cobra.Command{
if err != nil {
return err
}
- formatOnePool(pool)
+
+ formatOnePool(response.GetPayload())
return nil
},
}
@@ -248,7 +304,7 @@ var poolUpdateCmd = &cobra.Command{
Long: `Updates pool characteristics.
This command updates the pool characteristics. Runners already created prior to updating
-the pool, will not be recreated. IF they no longer suit your needs, you will need to
+the pool, will not be recreated. If they no longer suit your needs, you will need to
explicitly remove them using the runner delete command.
`,
SilenceUsage: true,
@@ -265,6 +321,7 @@ explicitly remove them using the runner delete command.
return fmt.Errorf("too many arguments")
}
+ updatePoolReq := apiClientPools.NewUpdatePoolParams()
poolUpdateParams := params.UpdatePoolParams{}
if cmd.Flags().Changed("image") {
@@ -280,16 +337,19 @@ explicitly remove them using the runner delete command.
}
if cmd.Flags().Changed("os-type") {
- poolUpdateParams.OSType = params.OSType(poolOSType)
+ poolUpdateParams.OSType = commonParams.OSType(poolOSType)
}
if cmd.Flags().Changed("os-arch") {
- poolUpdateParams.OSArch = params.OSArch(poolOSArch)
+ poolUpdateParams.OSArch = commonParams.OSArch(poolOSArch)
}
if cmd.Flags().Changed("max-runners") {
poolUpdateParams.MaxRunners = &poolMaxRunners
}
+ if cmd.Flags().Changed("priority") {
+ poolUpdateParams.Priority = &priority
+ }
if cmd.Flags().Changed("min-idle-runners") {
poolUpdateParams.MinIdleRunners = &poolMinIdleRunners
@@ -329,24 +389,31 @@ explicitly remove them using the runner delete command.
poolUpdateParams.ExtraSpecs = data
}
- pool, err := cli.UpdatePoolByID(args[0], poolUpdateParams)
+ updatePoolReq.PoolID = args[0]
+ updatePoolReq.Body = poolUpdateParams
+ response, err := apiCli.Pools.UpdatePool(updatePoolReq, authToken)
if err != nil {
return err
}
- formatOnePool(pool)
+ formatOnePool(response.Payload)
return nil
},
}
func init() {
poolListCmd.Flags().StringVarP(&poolRepository, "repo", "r", "", "List all pools within this repository.")
- poolListCmd.Flags().StringVarP(&poolOrganization, "org", "o", "", "List all pools withing this organization.")
- poolListCmd.Flags().StringVarP(&poolEnterprise, "enterprise", "e", "", "List all pools withing this enterprise.")
- poolListCmd.Flags().BoolVarP(&poolAll, "all", "a", false, "List all pools, regardless of org or repo.")
- poolListCmd.MarkFlagsMutuallyExclusive("repo", "org", "all", "enterprise")
+ poolListCmd.Flags().StringVarP(&poolOrganization, "org", "o", "", "List all pools within this organization.")
+ poolListCmd.Flags().StringVarP(&poolEnterprise, "enterprise", "e", "", "List all pools within this enterprise.")
+ poolListCmd.Flags().BoolVarP(&poolAll, "all", "a", true, "List all pools, regardless of org or repo.")
+ poolListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+ poolListCmd.Flags().StringVar(&endpointName, "endpoint", "", "When using the name of an entity, the endpoint must be specified when multiple entities with the same name exist.")
+
+ poolListCmd.Flags().MarkDeprecated("all", "all pools are listed by default in the absence of --repo, --org or --enterprise.")
+ poolListCmd.MarkFlagsMutuallyExclusive("repo", "org", "enterprise", "all")
poolUpdateCmd.Flags().StringVar(&poolImage, "image", "", "The provider-specific image name to use for runners in this pool.")
+ poolUpdateCmd.Flags().UintVar(&priority, "priority", 0, "When multiple pools match the same labels, priority dictates the order by which they are returned, in descending order.")
poolUpdateCmd.Flags().StringVar(&poolFlavor, "flavor", "", "The flavor to use for this runner.")
poolUpdateCmd.Flags().StringVar(&poolTags, "tags", "", "A comma separated list of tags to assign to this runner.")
poolUpdateCmd.Flags().StringVar(&poolOSType, "os-type", "linux", "Operating system type (windows, linux, etc).")
@@ -362,6 +429,7 @@ func init() {
poolUpdateCmd.MarkFlagsMutuallyExclusive("extra-specs-file", "extra-specs")
poolAddCmd.Flags().StringVar(&poolProvider, "provider-name", "", "The name of the provider where runners will be created.")
+ poolAddCmd.Flags().UintVar(&priority, "priority", 0, "When multiple pools match the same labels, priority dictates the order by which they are returned, in descending order.")
poolAddCmd.Flags().StringVar(&poolImage, "image", "", "The provider-specific image name to use for runners in this pool.")
poolAddCmd.Flags().StringVar(&poolFlavor, "flavor", "", "The flavor to use for this runner.")
poolAddCmd.Flags().StringVar(&poolRunnerPrefix, "runner-prefix", "", "The name prefix to use for runners in this pool.")
@@ -375,14 +443,16 @@ func init() {
poolAddCmd.Flags().UintVar(&poolRunnerBootstrapTimeout, "runner-bootstrap-timeout", 20, "Duration in minutes after which a runner is considered failed if it does not join Github.")
poolAddCmd.Flags().UintVar(&poolMinIdleRunners, "min-idle-runners", 1, "Attempt to maintain a minimum of idle self-hosted runners of this type.")
poolAddCmd.Flags().BoolVar(&poolEnabled, "enabled", false, "Enable this pool.")
+ poolAddCmd.Flags().StringVar(&endpointName, "endpoint", "", "When using the name of an entity, the endpoint must be specified when multiple entities with the same name exist.")
+
poolAddCmd.MarkFlagRequired("provider-name") //nolint
poolAddCmd.MarkFlagRequired("image") //nolint
poolAddCmd.MarkFlagRequired("flavor") //nolint
poolAddCmd.MarkFlagRequired("tags") //nolint
poolAddCmd.Flags().StringVarP(&poolRepository, "repo", "r", "", "Add the new pool within this repository.")
- poolAddCmd.Flags().StringVarP(&poolOrganization, "org", "o", "", "Add the new pool withing this organization.")
- poolAddCmd.Flags().StringVarP(&poolEnterprise, "enterprise", "e", "", "Add the new pool withing this enterprise.")
+ poolAddCmd.Flags().StringVarP(&poolOrganization, "org", "o", "", "Add the new pool within this organization.")
+ poolAddCmd.Flags().StringVarP(&poolEnterprise, "enterprise", "e", "", "Add the new pool within this enterprise.")
poolAddCmd.MarkFlagsMutuallyExclusive("repo", "org", "enterprise")
poolAddCmd.MarkFlagsMutuallyExclusive("extra-specs-file", "extra-specs")
@@ -400,7 +470,7 @@ func init() {
func extraSpecsFromFile(specsFile string) (json.RawMessage, error) {
data, err := os.ReadFile(specsFile)
if err != nil {
- return nil, errors.Wrap(err, "opening specs file")
+ return nil, fmt.Errorf("error opening specs file: %w", err)
}
return asRawMessage(data)
}
@@ -410,21 +480,31 @@ func asRawMessage(data []byte) (json.RawMessage, error) {
// have a valid json.
var unmarshaled interface{}
if err := json.Unmarshal(data, &unmarshaled); err != nil {
- return nil, errors.Wrap(err, "decoding extra specs")
+ return nil, fmt.Errorf("error decoding extra specs: %w", err)
}
- var asRawJson json.RawMessage
+ var asRawJSON json.RawMessage
var err error
- asRawJson, err = json.Marshal(unmarshaled)
+ asRawJSON, err = json.Marshal(unmarshaled)
if err != nil {
- return nil, errors.Wrap(err, "marshaling json")
+ return nil, fmt.Errorf("error marshaling json: %w", err)
}
- return asRawJson, nil
+ return asRawJSON, nil
}
func formatPools(pools []params.Pool) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(pools)
+ return
+ }
t := table.NewWriter()
- header := table.Row{"ID", "Image", "Flavor", "Tags", "Belongs to", "Level", "Enabled", "Runner Prefix"}
+ t.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 2, WidthMax: 40},
+ })
+ header := table.Row{"ID", "Image", "Flavor", "Tags", "Belongs to", "Endpoint", "Forge Type", "Enabled"}
+ if long {
+ header = append(header, "Level", "Created At", "Updated at", "Runner Prefix", "Priority")
+ }
t.AppendHeader(header)
for _, pool := range pools {
@@ -435,23 +515,32 @@ func formatPools(pools []params.Pool) {
var belongsTo string
var level string
- if pool.RepoID != "" && pool.RepoName != "" {
+ switch {
+ case pool.RepoID != "" && pool.RepoName != "":
belongsTo = pool.RepoName
- level = "repo"
- } else if pool.OrgID != "" && pool.OrgName != "" {
+ level = entityTypeRepo
+ case pool.OrgID != "" && pool.OrgName != "":
belongsTo = pool.OrgName
- level = "org"
- } else if pool.EnterpriseID != "" && pool.EnterpriseName != "" {
+ level = entityTypeOrg
+ case pool.EnterpriseID != "" && pool.EnterpriseName != "":
belongsTo = pool.EnterpriseName
- level = "enterprise"
+ level = entityTypeEnterprise
}
- t.AppendRow(table.Row{pool.ID, pool.Image, pool.Flavor, strings.Join(tags, " "), belongsTo, level, pool.Enabled, pool.GetRunnerPrefix()})
+ row := table.Row{pool.ID, pool.Image, pool.Flavor, strings.Join(tags, " "), belongsTo, pool.Endpoint.Name, pool.Endpoint.EndpointType, pool.Enabled}
+ if long {
+ row = append(row, level, pool.CreatedAt, pool.UpdatedAt, pool.GetRunnerPrefix(), pool.Priority)
+ }
+ t.AppendRow(row)
t.AppendSeparator()
}
fmt.Println(t.Render())
}
func formatOnePool(pool params.Pool) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(pool)
+ return
+ }
t := table.NewWriter()
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
@@ -465,20 +554,24 @@ func formatOnePool(pool params.Pool) {
var belongsTo string
var level string
- if pool.RepoID != "" && pool.RepoName != "" {
+ switch {
+ case pool.RepoID != "" && pool.RepoName != "":
belongsTo = pool.RepoName
- level = "repo"
- } else if pool.OrgID != "" && pool.OrgName != "" {
+ level = entityTypeRepo
+ case pool.OrgID != "" && pool.OrgName != "":
belongsTo = pool.OrgName
- level = "org"
- } else if pool.EnterpriseID != "" && pool.EnterpriseName != "" {
+ level = entityTypeOrg
+ case pool.EnterpriseID != "" && pool.EnterpriseName != "":
belongsTo = pool.EnterpriseName
- level = "enterprise"
+ level = entityTypeEnterprise
}
t.AppendHeader(header)
t.AppendRow(table.Row{"ID", pool.ID})
+ t.AppendRow(table.Row{"Created At", pool.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", pool.UpdatedAt})
t.AppendRow(table.Row{"Provider Name", pool.ProviderName})
+ t.AppendRow(table.Row{"Priority", pool.Priority})
t.AppendRow(table.Row{"Image", pool.Image})
t.AppendRow(table.Row{"Flavor", pool.Flavor})
t.AppendRow(table.Row{"OS Type", pool.OSType})
@@ -492,7 +585,9 @@ func formatOnePool(pool params.Pool) {
t.AppendRow(table.Row{"Enabled", pool.Enabled})
t.AppendRow(table.Row{"Runner Prefix", pool.GetRunnerPrefix()})
t.AppendRow(table.Row{"Extra specs", string(pool.ExtraSpecs)})
- t.AppendRow(table.Row{"GitHub Runner Group", string(pool.GitHubRunnerGroup)})
+ t.AppendRow(table.Row{"GitHub Runner Group", pool.GitHubRunnerGroup})
+ t.AppendRow(table.Row{"Forge Type", pool.Endpoint.EndpointType})
+ t.AppendRow(table.Row{"Endpoint Name", pool.Endpoint.Name})
if len(pool.Instances) > 0 {
for _, instance := range pool.Instances {
diff --git a/cmd/garm-cli/cmd/profile.go b/cmd/garm-cli/cmd/profile.go
index 29c74323..7e3e4d5b 100644
--- a/cmd/garm-cli/cmd/profile.go
+++ b/cmd/garm-cli/cmd/profile.go
@@ -18,12 +18,13 @@ import (
"fmt"
"strings"
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ apiClientLogin "github.com/cloudbase/garm/client/login"
"github.com/cloudbase/garm/cmd/garm-cli/common"
"github.com/cloudbase/garm/cmd/garm-cli/config"
"github.com/cloudbase/garm/params"
-
- "github.com/jedib0t/go-pretty/v6/table"
- "github.com/spf13/cobra"
)
var (
@@ -54,7 +55,7 @@ This command will list all currently defined profiles in the local configuration
file of the garm client.
`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
@@ -75,7 +76,7 @@ var profileDeleteCmd = &cobra.Command{
Short: "Delete profile",
Long: `Delete a profile from the local CLI configuration.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -100,7 +101,7 @@ var poolSwitchCmd = &cobra.Command{
Short: "Switch to a different profile",
Long: `Switch the CLI to a different profile.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -131,7 +132,7 @@ var profileAddCmd = &cobra.Command{
Short: "Add profile",
Long: `Create a profile for a new garm installation.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if cfg != nil {
if cfg.HasManager(loginProfileName) {
return fmt.Errorf("a manager with name %s already exists in your local config", loginProfileName)
@@ -143,12 +144,15 @@ var profileAddCmd = &cobra.Command{
}
url := strings.TrimSuffix(loginURL, "/")
- loginParams := params.PasswordLoginParams{
+
+ initAPIClient(url, "")
+
+ newLoginParamsReq := apiClientLogin.NewLoginParams()
+ newLoginParamsReq.Body = params.PasswordLoginParams{
Username: loginUserName,
Password: loginPassword,
}
-
- resp, err := cli.Login(url, loginParams)
+ resp, err := apiCli.Login.Login(newLoginParamsReq, authToken)
if err != nil {
return err
}
@@ -156,7 +160,7 @@ var profileAddCmd = &cobra.Command{
cfg.Managers = append(cfg.Managers, config.Manager{
Name: loginProfileName,
BaseURL: url,
- Token: resp,
+ Token: resp.Payload.Token,
})
cfg.ActiveManager = loginProfileName
@@ -176,7 +180,7 @@ This command will refresh the bearer token associated with an already defined ga
installation, by performing a login.
`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
@@ -190,16 +194,17 @@ installation, by performing a login.
return err
}
- loginParams := params.PasswordLoginParams{
+ newLoginParamsReq := apiClientLogin.NewLoginParams()
+ newLoginParamsReq.Body = params.PasswordLoginParams{
Username: loginUserName,
Password: loginPassword,
}
- resp, err := cli.Login(mgr.BaseURL, loginParams)
+ resp, err := apiCli.Login.Login(newLoginParamsReq, authToken)
if err != nil {
return err
}
- if err := cfg.SetManagerToken(mgr.Name, resp); err != nil {
+ if err := cfg.SetManagerToken(mgr.Name, resp.Payload.Token); err != nil {
return fmt.Errorf("error saving new token: %s", err)
}
@@ -234,6 +239,10 @@ func init() {
}
func formatProfiles(profiles []config.Manager) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(profiles)
+ return
+ }
t := table.NewWriter()
header := table.Row{"Name", "Base URL"}
t.AppendHeader(header)
@@ -259,7 +268,7 @@ func promptUnsetLoginVariables() error {
}
if loginPassword == "" {
- loginPassword, err = common.PromptPassword("Password")
+ loginPassword, err = common.PromptPassword("Password", "")
if err != nil {
return err
}
diff --git a/cmd/garm-cli/cmd/provider.go b/cmd/garm-cli/cmd/provider.go
index 40840abf..b4f05401 100644
--- a/cmd/garm-cli/cmd/provider.go
+++ b/cmd/garm-cli/cmd/provider.go
@@ -17,10 +17,12 @@ package cmd
import (
"fmt"
- "github.com/cloudbase/garm/params"
-
"github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ apiClientProviders "github.com/cloudbase/garm/client/providers"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
// providerCmd represents the provider command
@@ -44,16 +46,17 @@ func init() {
Short: "List all configured providers",
Long: `List all cloud providers configured with the service.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, _ []string) error {
if needsInit {
return errNeedsInitError
}
- providers, err := cli.ListProviders()
+ listProvidersReq := apiClientProviders.NewListProvidersParams()
+ response, err := apiCli.Providers.ListProviders(listProvidersReq, authToken)
if err != nil {
return err
}
- formatProviders(providers)
+ formatProviders(response.Payload)
return nil
},
})
@@ -62,6 +65,10 @@ func init() {
}
func formatProviders(providers []params.Provider) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(providers)
+ return
+ }
t := table.NewWriter()
header := table.Row{"Name", "Description", "Type"}
t.AppendHeader(header)
diff --git a/cmd/garm-cli/cmd/repository.go b/cmd/garm-cli/cmd/repository.go
index 01324fbc..cca1a7fe 100644
--- a/cmd/garm-cli/cmd/repository.go
+++ b/cmd/garm-cli/cmd/repository.go
@@ -16,18 +16,28 @@ package cmd
import (
"fmt"
-
- "github.com/cloudbase/garm/params"
+ "strings"
"github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ "github.com/cloudbase/garm-provider-common/util"
+ apiClientRepos "github.com/cloudbase/garm/client/repositories"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
var (
- repoOwner string
- repoName string
- repoWebhookSecret string
- repoCreds string
+ repoOwner string
+ repoName string
+ repoEndpoint string
+ repoWebhookSecret string
+ repoCreds string
+ forgeType string
+ randomWebhookSecret bool
+ insecureRepoWebhook bool
+ keepRepoWebhook bool
+ installRepoWebhook bool
)
// repositoryCmd represents the repository command
@@ -44,58 +54,20 @@ repository for which the garm maintains pools of self hosted runners.`,
Run: nil,
}
-var repoAddCmd = &cobra.Command{
- Use: "add",
- Aliases: []string{"create"},
- Short: "Add repository",
- Long: `Add a new repository to the manager.`,
+var repoWebhookCmd = &cobra.Command{
+ Use: "webhook",
+ Short: "Manage repository webhooks",
+ Long: `Manage repository webhooks.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
- if needsInit {
- return errNeedsInitError
- }
-
- newRepoReq := params.CreateRepoParams{
- Owner: repoOwner,
- Name: repoName,
- WebhookSecret: repoWebhookSecret,
- CredentialsName: repoCreds,
- }
- repo, err := cli.CreateRepository(newRepoReq)
- if err != nil {
- return err
- }
- formatOneRepository(repo)
- return nil
- },
+ Run: nil,
}
-var repoListCmd = &cobra.Command{
- Use: "list",
- Aliases: []string{"ls"},
- Short: "List repositories",
- Long: `List all configured respositories that are currently managed.`,
+var repoWebhookInstallCmd = &cobra.Command{
+ Use: "install",
+ Short: "Install webhook",
+ Long: `Install webhook for a repository.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
- if needsInit {
- return errNeedsInitError
- }
-
- repos, err := cli.ListRepositories()
- if err != nil {
- return err
- }
- formatRepositories(repos)
- return nil
- },
-}
-
-var repoShowCmd = &cobra.Command{
- Use: "show",
- Short: "Show details for one repository",
- Long: `Displays detailed information about a single repository.`,
- SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -105,11 +77,238 @@ var repoShowCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
- repo, err := cli.GetRepository(args[0])
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
if err != nil {
return err
}
- formatOneRepository(repo)
+
+ installWebhookReq := apiClientRepos.NewInstallRepoWebhookParams()
+ installWebhookReq.RepoID = repoID
+ installWebhookReq.Body.InsecureSSL = insecureRepoWebhook
+ installWebhookReq.Body.WebhookEndpointType = params.WebhookEndpointDirect
+
+ response, err := apiCli.Repositories.InstallRepoWebhook(installWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneHookInfo(response.Payload)
+ return nil
+ },
+}
+
+var repoHookInfoShowCmd = &cobra.Command{
+ Use: "show",
+ Short: "Show webhook info",
+ Long: `Show webhook info for a repository.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires a repository ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
+ if err != nil {
+ return err
+ }
+
+ showWebhookInfoReq := apiClientRepos.NewGetRepoWebhookInfoParams()
+ showWebhookInfoReq.RepoID = repoID
+
+ response, err := apiCli.Repositories.GetRepoWebhookInfo(showWebhookInfoReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneHookInfo(response.Payload)
+ return nil
+ },
+}
+
+var repoWebhookUninstallCmd = &cobra.Command{
+ Use: "uninstall",
+ Short: "Uninstall webhook",
+ Long: `Uninstall webhook for a repository.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires a repository ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
+ if err != nil {
+ return err
+ }
+
+ uninstallWebhookReq := apiClientRepos.NewUninstallRepoWebhookParams()
+ uninstallWebhookReq.RepoID = repoID
+
+ err = apiCli.Repositories.UninstallRepoWebhook(uninstallWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var repoAddCmd = &cobra.Command{
+ Use: "add",
+ Aliases: []string{"create"},
+ Short: "Add repository",
+ Long: `Add a new repository to the manager.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if randomWebhookSecret {
+ secret, err := util.GetRandomString(32)
+ if err != nil {
+ return err
+ }
+ repoWebhookSecret = secret
+ }
+
+ newRepoReq := apiClientRepos.NewCreateRepoParams()
+ newRepoReq.Body = params.CreateRepoParams{
+ Owner: repoOwner,
+ Name: repoName,
+ WebhookSecret: repoWebhookSecret,
+ CredentialsName: repoCreds,
+ ForgeType: params.EndpointType(forgeType),
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
+ }
+ response, err := apiCli.Repositories.CreateRepo(newRepoReq, authToken)
+ if err != nil {
+ return err
+ }
+
+ if installRepoWebhook {
+ installWebhookReq := apiClientRepos.NewInstallRepoWebhookParams()
+ installWebhookReq.RepoID = response.Payload.ID
+ installWebhookReq.Body.WebhookEndpointType = params.WebhookEndpointDirect
+
+ _, err := apiCli.Repositories.InstallRepoWebhook(installWebhookReq, authToken)
+ if err != nil {
+ return err
+ }
+ }
+
+ getRepoReq := apiClientRepos.NewGetRepoParams()
+ getRepoReq.RepoID = response.Payload.ID
+ repo, err := apiCli.Repositories.GetRepo(getRepoReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneRepository(repo.Payload)
+ return nil
+ },
+}
+
+var repoListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ Short: "List repositories",
+ Long: `List all configured repositories that are currently managed.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ listReposReq := apiClientRepos.NewListReposParams()
+ listReposReq.Name = &repoName
+ listReposReq.Owner = &repoOwner
+ listReposReq.Endpoint = &repoEndpoint
+ response, err := apiCli.Repositories.ListRepos(listReposReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatRepositories(response.Payload)
+ return nil
+ },
+}
+
+var repoUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update repository",
+ Long: `Update repository credentials or webhook secret.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) == 0 {
+ return fmt.Errorf("command requires a repo ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
+ if err != nil {
+ return err
+ }
+
+ updateReposReq := apiClientRepos.NewUpdateRepoParams()
+ updateReposReq.Body = params.UpdateEntityParams{
+ WebhookSecret: repoWebhookSecret,
+ CredentialsName: repoCreds,
+ PoolBalancerType: params.PoolBalancerType(poolBalancerType),
+ }
+ updateReposReq.RepoID = repoID
+
+ response, err := apiCli.Repositories.UpdateRepo(updateReposReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneRepository(response.Payload)
+ return nil
+ },
+}
+
+var repoShowCmd = &cobra.Command{
+ Use: "show",
+ Short: "Show details for one repository",
+ Long: `Displays detailed information about a single repository.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+ if len(args) == 0 {
+ return fmt.Errorf("requires a repository ID")
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
+ if err != nil {
+ return err
+ }
+
+ showRepoReq := apiClientRepos.NewGetRepoParams()
+ showRepoReq.RepoID = repoID
+ response, err := apiCli.Repositories.GetRepo(showRepoReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneRepository(response.Payload)
return nil
},
}
@@ -120,7 +319,7 @@ var repoDeleteCmd = &cobra.Command{
Short: "Removes one repository",
Long: `Delete one repository from the manager.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -130,7 +329,16 @@ var repoDeleteCmd = &cobra.Command{
if len(args) > 1 {
return fmt.Errorf("too many arguments")
}
- if err := cli.DeleteRepository(args[0]); err != nil {
+
+ repoID, err := resolveRepository(args[0], repoEndpoint)
+ if err != nil {
+ return err
+ }
+
+ deleteRepoReq := apiClientRepos.NewDeleteRepoParams()
+ deleteRepoReq.RepoID = repoID
+ deleteRepoReq.KeepWebhook = &keepRepoWebhook
+ if err := apiCli.Repositories.DeleteRepo(deleteRepoReq, authToken); err != nil {
return err
}
return nil
@@ -138,45 +346,104 @@ var repoDeleteCmd = &cobra.Command{
}
func init() {
-
repoAddCmd.Flags().StringVar(&repoOwner, "owner", "", "The owner of this repository")
+ repoAddCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", string(params.PoolBalancerTypeRoundRobin), "The balancing strategy to use when creating runners in pools matching requested labels.")
repoAddCmd.Flags().StringVar(&repoName, "name", "", "The name of the repository")
+ repoAddCmd.Flags().StringVar(&forgeType, "forge-type", "", "The forge type of the repository. Supported values: github, gitea.")
repoAddCmd.Flags().StringVar(&repoWebhookSecret, "webhook-secret", "", "The webhook secret for this repository")
repoAddCmd.Flags().StringVar(&repoCreds, "credentials", "", "Credentials name. See credentials list.")
+ repoAddCmd.Flags().BoolVar(&randomWebhookSecret, "random-webhook-secret", false, "Generate a random webhook secret for this repository.")
+ repoAddCmd.Flags().BoolVar(&installRepoWebhook, "install-webhook", false, "Install the webhook as part of the add operation.")
+ repoAddCmd.MarkFlagsMutuallyExclusive("webhook-secret", "random-webhook-secret")
+ repoAddCmd.MarkFlagsOneRequired("webhook-secret", "random-webhook-secret")
+
+ repoListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
+ repoListCmd.Flags().StringVarP(&repoName, "name", "n", "", "Exact repo name to filter by.")
+ repoListCmd.Flags().StringVarP(&repoOwner, "owner", "o", "", "Exact repo owner to filter by.")
+ repoListCmd.Flags().StringVarP(&repoEndpoint, "endpoint", "e", "", "Exact endpoint name to filter by.")
+
repoAddCmd.MarkFlagRequired("credentials") //nolint
repoAddCmd.MarkFlagRequired("owner") //nolint
repoAddCmd.MarkFlagRequired("name") //nolint
+ repoDeleteCmd.Flags().BoolVar(&keepRepoWebhook, "keep-webhook", false, "Do not delete any existing webhook when removing the repo from GARM.")
+ repoDeleteCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoShowCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoUpdateCmd.Flags().StringVar(&repoWebhookSecret, "webhook-secret", "", "The webhook secret for this repository. If you update this secret, you will have to manually update the secret in GitHub as well.")
+ repoUpdateCmd.Flags().StringVar(&repoCreds, "credentials", "", "Credentials name. See credentials list.")
+ repoUpdateCmd.Flags().StringVar(&poolBalancerType, "pool-balancer-type", "", "The balancing strategy to use when creating runners in pools matching requested labels.")
+ repoUpdateCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoWebhookInstallCmd.Flags().BoolVar(&insecureRepoWebhook, "insecure", false, "Ignore self signed certificate errors.")
+ repoWebhookInstallCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoWebhookUninstallCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoHookInfoShowCmd.Flags().StringVar(&repoEndpoint, "endpoint", "", "When using the name of the repo, the endpoint must be specified when multiple repositories with the same name exist.")
+
+ repoWebhookCmd.AddCommand(
+ repoWebhookInstallCmd,
+ repoWebhookUninstallCmd,
+ repoHookInfoShowCmd,
+ )
+
repositoryCmd.AddCommand(
repoListCmd,
repoAddCmd,
repoShowCmd,
repoDeleteCmd,
+ repoUpdateCmd,
+ repoWebhookCmd,
)
rootCmd.AddCommand(repositoryCmd)
}
func formatRepositories(repos []params.Repository) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(repos)
+ return
+ }
t := table.NewWriter()
- header := table.Row{"ID", "Owner", "Name", "Credentials name", "Pool mgr running"}
+ header := table.Row{"ID", "Owner", "Name", "Endpoint", "Credentials name", "Pool Balancer Type", "Forge type", "Pool mgr running"}
+ if long {
+ header = append(header, "Created At", "Updated At")
+ }
t.AppendHeader(header)
for _, val := range repos {
- t.AppendRow(table.Row{val.ID, val.Owner, val.Name, val.CredentialsName, val.PoolManagerStatus.IsRunning})
+ forgeType := val.Endpoint.EndpointType
+ if forgeType == "" {
+ forgeType = params.GithubEndpointType
+ }
+ row := table.Row{val.ID, val.Owner, val.Name, val.Endpoint.Name, val.GetCredentialsName(), val.GetBalancerType(), forgeType, val.PoolManagerStatus.IsRunning}
+ if long {
+ row = append(row, val.CreatedAt, val.UpdatedAt)
+ }
+ t.AppendRow(row)
t.AppendSeparator()
}
fmt.Println(t.Render())
}
func formatOneRepository(repo params.Repository) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(repo)
+ return
+ }
t := table.NewWriter()
rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
header := table.Row{"Field", "Value"}
t.AppendHeader(header)
t.AppendRow(table.Row{"ID", repo.ID})
+ t.AppendRow(table.Row{"Created At", repo.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", repo.UpdatedAt})
t.AppendRow(table.Row{"Owner", repo.Owner})
t.AppendRow(table.Row{"Name", repo.Name})
- t.AppendRow(table.Row{"Credentials", repo.CredentialsName})
+ t.AppendRow(table.Row{"Endpoint", repo.Endpoint.Name})
+ t.AppendRow(table.Row{"Pool balancer type", repo.GetBalancerType()})
+ t.AppendRow(table.Row{"Credentials", repo.GetCredentialsName()})
t.AppendRow(table.Row{"Pool manager running", repo.PoolManagerStatus.IsRunning})
if !repo.PoolManagerStatus.IsRunning {
t.AppendRow(table.Row{"Failure reason", repo.PoolManagerStatus.FailureReason})
@@ -187,9 +454,16 @@ func formatOneRepository(repo params.Repository) {
t.AppendRow(table.Row{"Pools", pool.ID}, rowConfigAutoMerge)
}
}
+
+ if len(repo.Events) > 0 {
+ for _, event := range repo.Events {
+ t.AppendRow(table.Row{"Events", fmt.Sprintf("%s %s: %s", event.CreatedAt.Format("2006-01-02T15:04:05"), strings.ToUpper(string(event.EventLevel)), event.Message)}, rowConfigAutoMerge)
+ }
+ }
+
t.SetColumnConfigs([]table.ColumnConfig{
{Number: 1, AutoMerge: true},
- {Number: 2, AutoMerge: false},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
})
fmt.Println(t.Render())
diff --git a/cmd/garm-cli/cmd/root.go b/cmd/garm-cli/cmd/root.go
index 716b8038..df3ef11b 100644
--- a/cmd/garm-cli/cmd/root.go
+++ b/cmd/garm-cli/cmd/root.go
@@ -15,25 +15,38 @@
package cmd
import (
+ "encoding/json"
"fmt"
+ "net/url"
"os"
- "github.com/cloudbase/garm/cmd/garm-cli/client"
- "github.com/cloudbase/garm/cmd/garm-cli/config"
-
+ "github.com/go-openapi/runtime"
+ openapiRuntimeClient "github.com/go-openapi/runtime/client"
+ "github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ apiClient "github.com/cloudbase/garm/client"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/cmd/garm-cli/config"
+ "github.com/cloudbase/garm/params"
)
-var Version string
+const (
+ entityTypeOrg string = "org"
+ entityTypeRepo string = "repo"
+ entityTypeEnterprise string = "enterprise"
+)
var (
cfg *config.Config
mgr config.Manager
- cli *client.Client
- active string
+ apiCli *apiClient.GarmAPI
+ authToken runtime.ClientAuthInfoWriter
needsInit bool
debug bool
- errNeedsInitError = fmt.Errorf("please log into a garm installation first")
+ poolBalancerType string
+ outputFormat common.OutputFormat = common.OutputFormatTable
+ errNeedsInitError = fmt.Errorf("please log into a garm installation first")
)
// rootCmd represents the base command when called without any subcommands
@@ -47,6 +60,8 @@ var rootCmd = &cobra.Command{
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug on all API calls")
+ rootCmd.PersistentFlags().Var(&outputFormat, "format", "Output format (table, json)")
+
cobra.OnInitialize(initConfig)
err := rootCmd.Execute()
@@ -55,6 +70,28 @@ func Execute() {
}
}
+func initAPIClient(baseURL, token string) {
+ baseURLParsed, err := url.Parse(baseURL)
+ if err != nil {
+ fmt.Printf("Failed to parse base url %s: %s", baseURL, err)
+ os.Exit(1)
+ }
+ apiPath, err := url.JoinPath(baseURLParsed.Path, apiClient.DefaultBasePath)
+ if err != nil {
+ fmt.Printf("Failed to join base url path %s with %s: %s", baseURLParsed.Path, apiClient.DefaultBasePath, err)
+ os.Exit(1)
+ }
+ if debug {
+ os.Setenv("SWAGGER_DEBUG", "true")
+ }
+ transportCfg := apiClient.DefaultTransportConfig().
+ WithHost(baseURLParsed.Host).
+ WithBasePath(apiPath).
+ WithSchemes([]string{baseURLParsed.Scheme})
+ apiCli = apiClient.NewHTTPClientWithConfig(nil, transportCfg)
+ authToken = openapiRuntimeClient.BearerToken(token)
+}
+
func initConfig() {
var err error
cfg, err = config.LoadConfig()
@@ -70,7 +107,29 @@ func initConfig() {
if err != nil {
mgr = cfg.Managers[0]
}
- active = mgr.Name
}
- cli = client.NewClient(active, mgr, debug)
+ initAPIClient(mgr.BaseURL, mgr.Token)
+}
+
+func formatOneHookInfo(hook params.HookInfo) {
+ t := table.NewWriter()
+ header := table.Row{"Field", "Value"}
+ t.AppendHeader(header)
+ t.AppendRows([]table.Row{
+ {"ID", hook.ID},
+ {"URL", hook.URL},
+ {"Events", hook.Events},
+ {"Active", hook.Active},
+ {"Insecure SSL", hook.InsecureSSL},
+ })
+ fmt.Println(t.Render())
+}
+
+func printAsJSON(value interface{}) {
+ asJs, err := json.Marshal(value)
+ if err != nil {
+ fmt.Printf("Failed to marshal value to json: %s", err)
+ os.Exit(1)
+ }
+ fmt.Println(string(asJs))
}
diff --git a/cmd/garm-cli/cmd/runner.go b/cmd/garm-cli/cmd/runner.go
index 7255526e..44a7b8df 100644
--- a/cmd/garm-cli/cmd/runner.go
+++ b/cmd/garm-cli/cmd/runner.go
@@ -18,18 +18,25 @@ import (
"fmt"
"os"
- "github.com/cloudbase/garm/params"
-
"github.com/jedib0t/go-pretty/v6/table"
"github.com/spf13/cobra"
+
+ apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
+ apiClientInstances "github.com/cloudbase/garm/client/instances"
+ apiClientOrgs "github.com/cloudbase/garm/client/organizations"
+ apiClientRepos "github.com/cloudbase/garm/client/repositories"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
)
var (
- runnerRepository string
- runnerOrganization string
- runnerEnterprise string
- runnerAll bool
- forceRemove bool
+ runnerRepository string
+ runnerOrganization string
+ runnerEnterprise string
+ runnerAll bool
+ forceRemove bool
+ bypassGHUnauthorized bool
+ long bool
)
// runnerCmd represents the runner command
@@ -43,6 +50,10 @@ list all instances.`,
Run: nil,
}
+type instancesPayloadGetter interface {
+ GetPayload() params.Instances
+}
+
var runnerListCmd = &cobra.Command{
Use: "list",
Aliases: []string{"ls"},
@@ -76,7 +87,7 @@ Example:
return errNeedsInitError
}
- var instances []params.Instance
+ var response instancesPayloadGetter
var err error
switch len(args) {
@@ -88,19 +99,37 @@ Example:
return fmt.Errorf("specifying a pool ID and any of [all org repo enterprise] are mutually exclusive")
}
- instances, err = cli.ListPoolInstances(args[0])
+ listPoolInstancesReq := apiClientInstances.NewListPoolInstancesParams()
+ listPoolInstancesReq.PoolID = args[0]
+ response, err = apiCli.Instances.ListPoolInstances(listPoolInstancesReq, authToken)
case 0:
if cmd.Flags().Changed("repo") {
- instances, err = cli.ListRepoInstances(runnerRepository)
+ runnerRepo, resErr := resolveRepository(runnerRepository, endpointName)
+ if resErr != nil {
+ return resErr
+ }
+ listRepoInstancesReq := apiClientRepos.NewListRepoInstancesParams()
+ listRepoInstancesReq.RepoID = runnerRepo
+ response, err = apiCli.Repositories.ListRepoInstances(listRepoInstancesReq, authToken)
} else if cmd.Flags().Changed("org") {
- instances, err = cli.ListOrgInstances(runnerOrganization)
+ runnerOrg, resErr := resolveOrganization(runnerOrganization, endpointName)
+ if resErr != nil {
+ return resErr
+ }
+ listOrgInstancesReq := apiClientOrgs.NewListOrgInstancesParams()
+ listOrgInstancesReq.OrgID = runnerOrg
+ response, err = apiCli.Organizations.ListOrgInstances(listOrgInstancesReq, authToken)
} else if cmd.Flags().Changed("enterprise") {
- instances, err = cli.ListEnterpriseInstances(runnerEnterprise)
- } else if cmd.Flags().Changed("all") {
- instances, err = cli.ListAllInstances()
+ runnerEnt, resErr := resolveEnterprise(runnerEnterprise, endpointName)
+ if resErr != nil {
+ return resErr
+ }
+ listEnterpriseInstancesReq := apiClientEnterprises.NewListEnterpriseInstancesParams()
+ listEnterpriseInstancesReq.EnterpriseID = runnerEnt
+ response, err = apiCli.Enterprises.ListEnterpriseInstances(listEnterpriseInstancesReq, authToken)
} else {
- cmd.Help() //nolint
- os.Exit(0)
+ listInstancesReq := apiClientInstances.NewListInstancesParams()
+ response, err = apiCli.Instances.ListInstances(listInstancesReq, authToken)
}
default:
cmd.Help() //nolint
@@ -110,7 +139,9 @@ Example:
if err != nil {
return err
}
- formatInstances(instances)
+
+ instances := response.GetPayload()
+ formatInstances(instances, long)
return nil
},
}
@@ -120,7 +151,7 @@ var runnerShowCmd = &cobra.Command{
Short: "Show details for a runner",
Long: `Displays a detailed view of a single runner.`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -133,11 +164,13 @@ var runnerShowCmd = &cobra.Command{
return fmt.Errorf("too many arguments")
}
- instance, err := cli.GetInstanceByName(args[0])
+ showInstanceReq := apiClientInstances.NewGetInstanceParams()
+ showInstanceReq.InstanceName = args[0]
+ response, err := apiCli.Instances.GetInstance(showInstanceReq, authToken)
if err != nil {
return err
}
- formatSingleInstance(instance)
+ formatSingleInstance(response.Payload)
return nil
},
}
@@ -157,7 +190,7 @@ NOTE: An active runner cannot be removed from Github. You will have
to either cancel the workflow or wait for it to finish.
`,
SilenceUsage: true,
- RunE: func(cmd *cobra.Command, args []string) error {
+ RunE: func(_ *cobra.Command, args []string) error {
if needsInit {
return errNeedsInitError
}
@@ -166,11 +199,11 @@ to either cancel the workflow or wait for it to finish.
return fmt.Errorf("requires a runner name")
}
- if !forceRemove {
- return fmt.Errorf("use --force-remove-runner=true to remove a runner")
- }
-
- if err := cli.DeleteRunner(args[0]); err != nil {
+ deleteInstanceReq := apiClientInstances.NewDeleteInstanceParams()
+ deleteInstanceReq.InstanceName = args[0]
+ deleteInstanceReq.ForceRemove = &forceRemove
+ deleteInstanceReq.BypassGHUnauthorized = &bypassGHUnauthorized
+ if err := apiCli.Instances.DeleteInstance(deleteInstanceReq, authToken); err != nil {
return err
}
return nil
@@ -179,12 +212,17 @@ to either cancel the workflow or wait for it to finish.
func init() {
runnerListCmd.Flags().StringVarP(&runnerRepository, "repo", "r", "", "List all runners from all pools within this repository.")
- runnerListCmd.Flags().StringVarP(&runnerOrganization, "org", "o", "", "List all runners from all pools withing this organization.")
- runnerListCmd.Flags().StringVarP(&runnerEnterprise, "enterprise", "e", "", "List all runners from all pools withing this enterprise.")
- runnerListCmd.Flags().BoolVarP(&runnerAll, "all", "a", false, "List all runners, regardless of org or repo.")
+ runnerListCmd.Flags().StringVarP(&runnerOrganization, "org", "o", "", "List all runners from all pools within this organization.")
+ runnerListCmd.Flags().StringVarP(&runnerEnterprise, "enterprise", "e", "", "List all runners from all pools within this enterprise.")
+ runnerListCmd.Flags().BoolVarP(&runnerAll, "all", "a", true, "List all runners, regardless of org or repo. (deprecated)")
+ runnerListCmd.Flags().BoolVarP(&long, "long", "l", false, "Include additional info.")
runnerListCmd.MarkFlagsMutuallyExclusive("repo", "org", "enterprise", "all")
+ runnerListCmd.Flags().StringVar(&endpointName, "endpoint", "", "When using the name of an entity, the endpoint must be specified when multiple entities with the same name exist.")
- runnerDeleteCmd.Flags().BoolVarP(&forceRemove, "force-remove-runner", "f", false, "Confirm you want to delete a runner")
+ runnerListCmd.Flags().MarkDeprecated("all", "all runners are listed by default in the absence of --repo, --org or --enterprise.")
+
+ runnerDeleteCmd.Flags().BoolVarP(&forceRemove, "force-remove-runner", "f", false, "Forcefully remove a runner. If set to true, GARM will ignore provider errors when removing the runner.")
+ runnerDeleteCmd.Flags().BoolVarP(&bypassGHUnauthorized, "bypass-github-unauthorized", "b", false, "Ignore Unauthorized errors from GitHub and proceed with removing runner from provider and DB. This is useful when credentials are no longer valid and you want to remove your runners. Warning, this has the potential to leave orphaned runners in GitHub. You will need to update your credentials to properly consolidate.")
runnerDeleteCmd.MarkFlagsMutuallyExclusive("force-remove-runner")
runnerCmd.AddCommand(
@@ -196,25 +234,46 @@ func init() {
rootCmd.AddCommand(runnerCmd)
}
-func formatInstances(param []params.Instance) {
+func formatInstances(param []params.Instance, detailed bool) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(param)
+ return
+ }
t := table.NewWriter()
- header := table.Row{"Name", "Status", "Runner Status", "Pool ID"}
+ header := table.Row{"Nr", "Name", "Status", "Runner Status", "Pool ID", "Scalse Set ID"}
+ if detailed {
+ header = append(header, "Created At", "Updated At", "Job Name", "Started At", "Run ID", "Repository")
+ }
t.AppendHeader(header)
- for _, inst := range param {
- t.AppendRow(table.Row{inst.Name, inst.Status, inst.RunnerStatus, inst.PoolID})
+ for idx, inst := range param {
+ row := table.Row{idx + 1, inst.Name, inst.Status, inst.RunnerStatus, inst.PoolID, inst.ScaleSetID}
+ if detailed {
+ row = append(row, inst.CreatedAt, inst.UpdatedAt)
+ if inst.Job != nil {
+ repo := fmt.Sprintf("%s/%s", inst.Job.RepositoryOwner, inst.Job.RepositoryName)
+ row = append(row, inst.Job.Name, inst.Job.StartedAt, inst.Job.RunID, repo)
+ }
+ }
+ t.AppendRow(row)
t.AppendSeparator()
}
fmt.Println(t.Render())
}
func formatSingleInstance(instance params.Instance) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(instance)
+ return
+ }
t := table.NewWriter()
header := table.Row{"Field", "Value"}
t.AppendHeader(header)
t.AppendRow(table.Row{"ID", instance.ID}, table.RowConfig{AutoMerge: false})
+ t.AppendRow(table.Row{"Created At", instance.CreatedAt})
+ t.AppendRow(table.Row{"Updated At", instance.UpdatedAt})
t.AppendRow(table.Row{"Provider ID", instance.ProviderID}, table.RowConfig{AutoMerge: false})
t.AppendRow(table.Row{"Name", instance.Name}, table.RowConfig{AutoMerge: false})
t.AppendRow(table.Row{"OS Type", instance.OSType}, table.RowConfig{AutoMerge: false})
@@ -223,7 +282,11 @@ func formatSingleInstance(instance params.Instance) {
t.AppendRow(table.Row{"OS Version", instance.OSVersion}, table.RowConfig{AutoMerge: false})
t.AppendRow(table.Row{"Status", instance.Status}, table.RowConfig{AutoMerge: false})
t.AppendRow(table.Row{"Runner Status", instance.RunnerStatus}, table.RowConfig{AutoMerge: false})
- t.AppendRow(table.Row{"Pool ID", instance.PoolID}, table.RowConfig{AutoMerge: false})
+ if instance.PoolID != "" {
+ t.AppendRow(table.Row{"Pool ID", instance.PoolID}, table.RowConfig{AutoMerge: false})
+ } else if instance.ScaleSetID != 0 {
+ t.AppendRow(table.Row{"Scale Set ID", instance.ScaleSetID}, table.RowConfig{AutoMerge: false})
+ }
if len(instance.Addresses) > 0 {
for _, addr := range instance.Addresses {
diff --git a/cmd/garm-cli/cmd/scalesets.go b/cmd/garm-cli/cmd/scalesets.go
new file mode 100644
index 00000000..a78fe33f
--- /dev/null
+++ b/cmd/garm-cli/cmd/scalesets.go
@@ -0,0 +1,539 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/jedib0t/go-pretty/v6/table"
+ "github.com/spf13/cobra"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
+ apiClientOrgs "github.com/cloudbase/garm/client/organizations"
+ apiClientRepos "github.com/cloudbase/garm/client/repositories"
+ apiClientScaleSets "github.com/cloudbase/garm/client/scalesets"
+ "github.com/cloudbase/garm/cmd/garm-cli/common"
+ "github.com/cloudbase/garm/params"
+)
+
+var (
+ scalesetProvider string
+ scalesetMaxRunners uint
+ scalesetMinIdleRunners uint
+ scalesetRunnerPrefix string
+ scalesetName string
+ scalesetImage string
+ scalesetFlavor string
+ scalesetOSType string
+ scalesetOSArch string
+ scalesetEnabled bool
+ scalesetRunnerBootstrapTimeout uint
+ scalesetRepository string
+ scalesetOrganization string
+ scalesetEnterprise string
+ scalesetExtraSpecsFile string
+ scalesetExtraSpecs string
+ scalesetGitHubRunnerGroup string
+)
+
+type scalesetPayloadGetter interface {
+ GetPayload() params.ScaleSet
+}
+
+type scalesetsPayloadGetter interface {
+ GetPayload() params.ScaleSets
+}
+
+// scalesetCmd represents the scale set command
+var scalesetCmd = &cobra.Command{
+ Use: "scaleset",
+ SilenceUsage: true,
+ Short: "List scale sets",
+ Long: `Query information or perform operations on scale sets.`,
+ Run: nil,
+}
+
+var scalesetListCmd = &cobra.Command{
+ Use: "list",
+ Aliases: []string{"ls"},
+ Short: "List scale sets",
+ Long: `List scale sets of repositories, orgs or all of the above.
+
+This command will list scale sets from one repo, one org or all scale sets
+on the system. The list flags are mutually exclusive. You must however
+specify one of them.
+
+Example:
+
+ List scalesets from one repo:
+ garm-cli scaleset list --repo=05e7eac6-4705-486d-89c9-0170bbb576af
+
+ List scalesets from one org:
+ garm-cli scaleset list --org=5493e51f-3170-4ce3-9f05-3fe690fc6ec6
+
+ List scalesets from one enterprise:
+ garm-cli scaleset list --enterprise=a8ee4c66-e762-4cbe-a35d-175dba2c9e62
+
+ List all scalesets from all repos, orgs and enterprises:
+ garm-cli scaleset list --all
+
+`,
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ var response scalesetsPayloadGetter
+ var err error
+
+ switch len(args) {
+ case 0:
+ if cmd.Flags().Changed("repo") {
+ scalesetRepository, err = resolveRepository(scalesetRepository, endpointName)
+ if err != nil {
+ return err
+ }
+ listRepoScaleSetsReq := apiClientRepos.NewListRepoScaleSetsParams()
+ listRepoScaleSetsReq.RepoID = scalesetRepository
+ response, err = apiCli.Repositories.ListRepoScaleSets(listRepoScaleSetsReq, authToken)
+ } else if cmd.Flags().Changed("org") {
+ scalesetOrganization, err = resolveOrganization(scalesetOrganization, endpointName)
+ if err != nil {
+ return err
+ }
+ listOrgScaleSetsReq := apiClientOrgs.NewListOrgScaleSetsParams()
+ listOrgScaleSetsReq.OrgID = scalesetOrganization
+ response, err = apiCli.Organizations.ListOrgScaleSets(listOrgScaleSetsReq, authToken)
+ } else if cmd.Flags().Changed("enterprise") {
+ scalesetEnterprise, err = resolveEnterprise(scalesetEnterprise, endpointName)
+ if err != nil {
+ return err
+ }
+ listEnterpriseScaleSetsReq := apiClientEnterprises.NewListEnterpriseScaleSetsParams()
+ listEnterpriseScaleSetsReq.EnterpriseID = scalesetEnterprise
+ response, err = apiCli.Enterprises.ListEnterpriseScaleSets(listEnterpriseScaleSetsReq, authToken)
+ } else {
+ listScaleSetsReq := apiClientScaleSets.NewListScalesetsParams()
+ response, err = apiCli.Scalesets.ListScalesets(listScaleSetsReq, authToken)
+ }
+ default:
+ cmd.Help() //nolint
+ os.Exit(0)
+ }
+
+ if err != nil {
+ return err
+ }
+ formatScaleSets(response.GetPayload())
+ return nil
+ },
+}
+
+var scaleSetShowCmd = &cobra.Command{
+ Use: "show",
+ Short: "Show details for a scale set",
+ Long: `Displays a detailed view of a single scale set.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) == 0 {
+ return fmt.Errorf("requires a scale set ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ getScaleSetReq := apiClientScaleSets.NewGetScaleSetParams()
+ getScaleSetReq.ScalesetID = args[0]
+ response, err := apiCli.Scalesets.GetScaleSet(getScaleSetReq, authToken)
+ if err != nil {
+ return err
+ }
+ formatOneScaleSet(response.Payload)
+ return nil
+ },
+}
+
+var scaleSetDeleteCmd = &cobra.Command{
+ Use: "delete",
+ Aliases: []string{"remove", "rm", "del"},
+ Short: "Delete scale set by ID",
+ Long: `Delete one scale set by referencing it's ID, regardless of repo or org.`,
+ SilenceUsage: true,
+ RunE: func(_ *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) == 0 {
+ return fmt.Errorf("requires a scale set ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ deleteScaleSetReq := apiClientScaleSets.NewDeleteScaleSetParams()
+ deleteScaleSetReq.ScalesetID = args[0]
+ if err := apiCli.Scalesets.DeleteScaleSet(deleteScaleSetReq, authToken); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var scaleSetAddCmd = &cobra.Command{
+ Use: "add",
+ Aliases: []string{"create"},
+ Short: "Add scale set",
+ Long: `Add a new scale set.`,
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ newScaleSetParams := params.CreateScaleSetParams{
+ RunnerPrefix: params.RunnerPrefix{
+ Prefix: scalesetRunnerPrefix,
+ },
+ ProviderName: scalesetProvider,
+ Name: scalesetName,
+ MaxRunners: scalesetMaxRunners,
+ MinIdleRunners: scalesetMinIdleRunners,
+ Image: scalesetImage,
+ Flavor: scalesetFlavor,
+ OSType: commonParams.OSType(scalesetOSType),
+ OSArch: commonParams.OSArch(scalesetOSArch),
+ Enabled: scalesetEnabled,
+ RunnerBootstrapTimeout: scalesetRunnerBootstrapTimeout,
+ GitHubRunnerGroup: scalesetGitHubRunnerGroup,
+ }
+
+ if cmd.Flags().Changed("extra-specs") {
+ data, err := asRawMessage([]byte(scalesetExtraSpecs))
+ if err != nil {
+ return err
+ }
+ newScaleSetParams.ExtraSpecs = data
+ }
+
+ if scalesetExtraSpecsFile != "" {
+ data, err := extraSpecsFromFile(scalesetExtraSpecsFile)
+ if err != nil {
+ return err
+ }
+ newScaleSetParams.ExtraSpecs = data
+ }
+
+ if err := newScaleSetParams.Validate(); err != nil {
+ return err
+ }
+
+ var err error
+ var response scalesetPayloadGetter
+ if cmd.Flags().Changed("repo") {
+ scalesetRepository, err = resolveRepository(scalesetRepository, endpointName)
+ if err != nil {
+ return err
+ }
+ newRepoScaleSetReq := apiClientRepos.NewCreateRepoScaleSetParams()
+ newRepoScaleSetReq.RepoID = scalesetRepository
+ newRepoScaleSetReq.Body = newScaleSetParams
+ response, err = apiCli.Repositories.CreateRepoScaleSet(newRepoScaleSetReq, authToken)
+ } else if cmd.Flags().Changed("org") {
+ scalesetOrganization, err = resolveOrganization(scalesetOrganization, endpointName)
+ if err != nil {
+ return err
+ }
+ newOrgScaleSetReq := apiClientOrgs.NewCreateOrgScaleSetParams()
+ newOrgScaleSetReq.OrgID = scalesetOrganization
+ newOrgScaleSetReq.Body = newScaleSetParams
+ response, err = apiCli.Organizations.CreateOrgScaleSet(newOrgScaleSetReq, authToken)
+ } else if cmd.Flags().Changed("enterprise") {
+ scalesetEnterprise, err = resolveEnterprise(scalesetEnterprise, endpointName)
+ if err != nil {
+ return err
+ }
+ newEnterpriseScaleSetReq := apiClientEnterprises.NewCreateEnterpriseScaleSetParams()
+ newEnterpriseScaleSetReq.EnterpriseID = scalesetEnterprise
+ newEnterpriseScaleSetReq.Body = newScaleSetParams
+ response, err = apiCli.Enterprises.CreateEnterpriseScaleSet(newEnterpriseScaleSetReq, authToken)
+ } else {
+ cmd.Help() //nolint
+ os.Exit(0)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ formatOneScaleSet(response.GetPayload())
+ return nil
+ },
+}
+
+var scaleSetUpdateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update one scale set",
+ Long: `Updates scale set characteristics.
+
+This command updates the scale set characteristics. Runners already created prior to updating
+the scale set, will not be recreated. If they no longer suit your needs, you will need to
+explicitly remove them using the runner delete command.
+ `,
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if needsInit {
+ return errNeedsInitError
+ }
+
+ if len(args) == 0 {
+ return fmt.Errorf("command requires a scale set ID")
+ }
+
+ if len(args) > 1 {
+ return fmt.Errorf("too many arguments")
+ }
+
+ updateScaleSetReq := apiClientScaleSets.NewUpdateScaleSetParams()
+ scaleSetUpdateParams := params.UpdateScaleSetParams{}
+
+ if cmd.Flags().Changed("image") {
+ scaleSetUpdateParams.Image = scalesetImage
+ }
+
+ if cmd.Flags().Changed("name") {
+ scaleSetUpdateParams.Name = scalesetName
+ }
+
+ if cmd.Flags().Changed("flavor") {
+ scaleSetUpdateParams.Flavor = scalesetFlavor
+ }
+
+ if cmd.Flags().Changed("os-type") {
+ scaleSetUpdateParams.OSType = commonParams.OSType(scalesetOSType)
+ }
+
+ if cmd.Flags().Changed("os-arch") {
+ scaleSetUpdateParams.OSArch = commonParams.OSArch(scalesetOSArch)
+ }
+
+ if cmd.Flags().Changed("max-runners") {
+ scaleSetUpdateParams.MaxRunners = &scalesetMaxRunners
+ }
+
+ if cmd.Flags().Changed("min-idle-runners") {
+ scaleSetUpdateParams.MinIdleRunners = &scalesetMinIdleRunners
+ }
+
+ if cmd.Flags().Changed("runner-prefix") {
+ scaleSetUpdateParams.RunnerPrefix = params.RunnerPrefix{
+ Prefix: scalesetRunnerPrefix,
+ }
+ }
+
+ if cmd.Flags().Changed("runner-group") {
+ scaleSetUpdateParams.GitHubRunnerGroup = &scalesetGitHubRunnerGroup
+ }
+
+ if cmd.Flags().Changed("enabled") {
+ scaleSetUpdateParams.Enabled = &scalesetEnabled
+ }
+
+ if cmd.Flags().Changed("runner-bootstrap-timeout") {
+ scaleSetUpdateParams.RunnerBootstrapTimeout = &scalesetRunnerBootstrapTimeout
+ }
+
+ if cmd.Flags().Changed("extra-specs") {
+ data, err := asRawMessage([]byte(scalesetExtraSpecs))
+ if err != nil {
+ return err
+ }
+ scaleSetUpdateParams.ExtraSpecs = data
+ }
+
+ if scalesetExtraSpecsFile != "" {
+ data, err := extraSpecsFromFile(scalesetExtraSpecsFile)
+ if err != nil {
+ return err
+ }
+ scaleSetUpdateParams.ExtraSpecs = data
+ }
+
+ updateScaleSetReq.ScalesetID = args[0]
+ updateScaleSetReq.Body = scaleSetUpdateParams
+ response, err := apiCli.Scalesets.UpdateScaleSet(updateScaleSetReq, authToken)
+ if err != nil {
+ return err
+ }
+
+ formatOneScaleSet(response.Payload)
+ return nil
+ },
+}
+
+func init() {
+ scalesetListCmd.Flags().StringVarP(&scalesetRepository, "repo", "r", "", "List all scale sets within this repository.")
+ scalesetListCmd.Flags().StringVarP(&scalesetOrganization, "org", "o", "", "List all scale sets within this organization.")
+ scalesetListCmd.Flags().StringVarP(&scalesetEnterprise, "enterprise", "e", "", "List all scale sets within this enterprise.")
+ scalesetListCmd.MarkFlagsMutuallyExclusive("repo", "org", "enterprise")
+ scalesetListCmd.Flags().StringVar(&endpointName, "endpoint", "", "When using the name of an entity, the endpoint must be specified when multiple entities with the same name exist.")
+
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetImage, "image", "", "The provider-specific image name to use for runners in this scale set.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetFlavor, "flavor", "", "The flavor to use for the runners in this scale set.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetName, "name", "", "The name of the scale set. This option is mandatory.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetOSType, "os-type", "linux", "Operating system type (windows, linux, etc).")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetOSArch, "os-arch", "amd64", "Operating system architecture (amd64, arm, etc).")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetRunnerPrefix, "runner-prefix", "", "The name prefix to use for runners in this scale set.")
+ scaleSetUpdateCmd.Flags().UintVar(&scalesetMaxRunners, "max-runners", 5, "The maximum number of runner this scale set will create.")
+ scaleSetUpdateCmd.Flags().UintVar(&scalesetMinIdleRunners, "min-idle-runners", 1, "Attempt to maintain a minimum of idle self-hosted runners of this type.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetGitHubRunnerGroup, "runner-group", "", "The GitHub runner group in which all runners of this scale set will be added.")
+ scaleSetUpdateCmd.Flags().BoolVar(&scalesetEnabled, "enabled", false, "Enable this scale set.")
+ scaleSetUpdateCmd.Flags().UintVar(&scalesetRunnerBootstrapTimeout, "runner-bootstrap-timeout", 20, "Duration in minutes after which a runner is considered failed if it does not join Github.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetExtraSpecsFile, "extra-specs-file", "", "A file containing a valid json which will be passed to the IaaS provider managing the scale set.")
+ scaleSetUpdateCmd.Flags().StringVar(&scalesetExtraSpecs, "extra-specs", "", "A valid json which will be passed to the IaaS provider managing the scale set.")
+ scaleSetUpdateCmd.MarkFlagsMutuallyExclusive("extra-specs-file", "extra-specs")
+
+ scaleSetAddCmd.Flags().StringVar(&scalesetProvider, "provider-name", "", "The name of the provider where runners will be created.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetImage, "image", "", "The provider-specific image name to use for runners in this scale set.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetName, "name", "", "The name of the scale set. This option is mandatory.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetFlavor, "flavor", "", "The flavor to use for this runner.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetRunnerPrefix, "runner-prefix", "", "The name prefix to use for runners in this scale set.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetOSType, "os-type", "linux", "Operating system type (windows, linux, etc).")
+ scaleSetAddCmd.Flags().StringVar(&scalesetOSArch, "os-arch", "amd64", "Operating system architecture (amd64, arm, etc).")
+ scaleSetAddCmd.Flags().StringVar(&scalesetExtraSpecsFile, "extra-specs-file", "", "A file containing a valid json which will be passed to the IaaS provider managing the scale set.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetExtraSpecs, "extra-specs", "", "A valid json which will be passed to the IaaS provider managing the scale set.")
+ scaleSetAddCmd.Flags().StringVar(&scalesetGitHubRunnerGroup, "runner-group", "", "The GitHub runner group in which all runners of this scale set will be added.")
+ scaleSetAddCmd.Flags().UintVar(&scalesetMaxRunners, "max-runners", 5, "The maximum number of runner this scale set will create.")
+ scaleSetAddCmd.Flags().UintVar(&scalesetRunnerBootstrapTimeout, "runner-bootstrap-timeout", 20, "Duration in minutes after which a runner is considered failed if it does not join Github.")
+ scaleSetAddCmd.Flags().UintVar(&scalesetMinIdleRunners, "min-idle-runners", 1, "Attempt to maintain a minimum of idle self-hosted runners of this type.")
+ scaleSetAddCmd.Flags().BoolVar(&scalesetEnabled, "enabled", false, "Enable this scale set.")
+ scaleSetAddCmd.Flags().StringVar(&endpointName, "endpoint", "", "When using the name of an entity, the endpoint must be specified when multiple entities with the same name exist.")
+ scaleSetAddCmd.MarkFlagRequired("provider-name") //nolint
+ scaleSetAddCmd.MarkFlagRequired("name") //nolint
+ scaleSetAddCmd.MarkFlagRequired("image") //nolint
+ scaleSetAddCmd.MarkFlagRequired("flavor") //nolint
+
+ scaleSetAddCmd.Flags().StringVarP(&scalesetRepository, "repo", "r", "", "Add the new scale set within this repository.")
+ scaleSetAddCmd.Flags().StringVarP(&scalesetOrganization, "org", "o", "", "Add the new scale set within this organization.")
+ scaleSetAddCmd.Flags().StringVarP(&scalesetEnterprise, "enterprise", "e", "", "Add the new scale set within this enterprise.")
+ scaleSetAddCmd.MarkFlagsMutuallyExclusive("repo", "org", "enterprise")
+ scaleSetAddCmd.MarkFlagsMutuallyExclusive("extra-specs-file", "extra-specs")
+
+ scalesetCmd.AddCommand(
+ scalesetListCmd,
+ scaleSetShowCmd,
+ scaleSetDeleteCmd,
+ scaleSetUpdateCmd,
+ scaleSetAddCmd,
+ )
+
+ rootCmd.AddCommand(scalesetCmd)
+}
+
+func formatScaleSets(scaleSets []params.ScaleSet) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(scaleSets)
+ return
+ }
+ t := table.NewWriter()
+ header := table.Row{"ID", "Scale Set Name", "Image", "Flavor", "Belongs to", "Level", "Runner Group", "Enabled", "Runner Prefix", "Provider"}
+ t.AppendHeader(header)
+
+ for _, scaleSet := range scaleSets {
+ var belongsTo string
+ var level string
+
+ switch {
+ case scaleSet.RepoID != "" && scaleSet.RepoName != "":
+ belongsTo = scaleSet.RepoName
+ level = entityTypeRepo
+ case scaleSet.OrgID != "" && scaleSet.OrgName != "":
+ belongsTo = scaleSet.OrgName
+ level = entityTypeOrg
+ case scaleSet.EnterpriseID != "" && scaleSet.EnterpriseName != "":
+ belongsTo = scaleSet.EnterpriseName
+ level = entityTypeEnterprise
+ }
+ t.AppendRow(table.Row{scaleSet.ID, scaleSet.Name, scaleSet.Image, scaleSet.Flavor, belongsTo, level, scaleSet.GitHubRunnerGroup, scaleSet.Enabled, scaleSet.GetRunnerPrefix(), scaleSet.ProviderName})
+ t.AppendSeparator()
+ }
+ fmt.Println(t.Render())
+}
+
+func formatOneScaleSet(scaleSet params.ScaleSet) {
+ if outputFormat == common.OutputFormatJSON {
+ printAsJSON(scaleSet)
+ return
+ }
+ t := table.NewWriter()
+ rowConfigAutoMerge := table.RowConfig{AutoMerge: true}
+
+ header := table.Row{"Field", "Value"}
+
+ var belongsTo string
+ var level string
+
+ switch {
+ case scaleSet.RepoID != "" && scaleSet.RepoName != "":
+ belongsTo = scaleSet.RepoName
+ level = entityTypeRepo
+ case scaleSet.OrgID != "" && scaleSet.OrgName != "":
+ belongsTo = scaleSet.OrgName
+ level = entityTypeOrg
+ case scaleSet.EnterpriseID != "" && scaleSet.EnterpriseName != "":
+ belongsTo = scaleSet.EnterpriseName
+ level = entityTypeEnterprise
+ }
+
+ t.AppendHeader(header)
+ t.AppendRow(table.Row{"ID", scaleSet.ID})
+ t.AppendRow(table.Row{"Scale Set ID", scaleSet.ScaleSetID})
+ t.AppendRow(table.Row{"Scale Name", scaleSet.Name})
+ t.AppendRow(table.Row{"Provider Name", scaleSet.ProviderName})
+ t.AppendRow(table.Row{"Image", scaleSet.Image})
+ t.AppendRow(table.Row{"Flavor", scaleSet.Flavor})
+ t.AppendRow(table.Row{"OS Type", scaleSet.OSType})
+ t.AppendRow(table.Row{"OS Architecture", scaleSet.OSArch})
+ t.AppendRow(table.Row{"Max Runners", scaleSet.MaxRunners})
+ t.AppendRow(table.Row{"Min Idle Runners", scaleSet.MinIdleRunners})
+ t.AppendRow(table.Row{"Runner Bootstrap Timeout", scaleSet.RunnerBootstrapTimeout})
+ t.AppendRow(table.Row{"Belongs to", belongsTo})
+ t.AppendRow(table.Row{"Level", level})
+ t.AppendRow(table.Row{"Enabled", scaleSet.Enabled})
+ t.AppendRow(table.Row{"Runner Prefix", scaleSet.GetRunnerPrefix()})
+ t.AppendRow(table.Row{"Extra specs", string(scaleSet.ExtraSpecs)})
+ t.AppendRow(table.Row{"GitHub Runner Group", scaleSet.GitHubRunnerGroup})
+
+ if len(scaleSet.Instances) > 0 {
+ for _, instance := range scaleSet.Instances {
+ t.AppendRow(table.Row{"Instances", fmt.Sprintf("%s (%s)", instance.Name, instance.ID)}, rowConfigAutoMerge)
+ }
+ }
+
+ t.SetColumnConfigs([]table.ColumnConfig{
+ {Number: 1, AutoMerge: true},
+ {Number: 2, AutoMerge: false, WidthMax: 100},
+ })
+ fmt.Println(t.Render())
+}
diff --git a/cmd/garm-cli/cmd/util.go b/cmd/garm-cli/cmd/util.go
new file mode 100644
index 00000000..26f57abb
--- /dev/null
+++ b/cmd/garm-cli/cmd/util.go
@@ -0,0 +1,108 @@
+package cmd
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/uuid"
+
+ apiClientEnterprises "github.com/cloudbase/garm/client/enterprises"
+ apiClientOrgs "github.com/cloudbase/garm/client/organizations"
+ apiClientRepos "github.com/cloudbase/garm/client/repositories"
+)
+
+func resolveRepository(nameOrID, endpoint string) (string, error) {
+ if nameOrID == "" {
+ return "", fmt.Errorf("missing repository name or ID")
+ }
+ entityID, err := uuid.Parse(nameOrID)
+ if err == nil {
+ return entityID.String(), nil
+ }
+
+ parts := strings.SplitN(nameOrID, "/", 2)
+ if len(parts) < 2 {
+ // format of friendly name is invalid for a repository.
+ // Return the string as is.
+ return nameOrID, nil
+ }
+
+ listReposReq := apiClientRepos.NewListReposParams()
+ listReposReq.Owner = &parts[0]
+ listReposReq.Name = &parts[1]
+ if endpoint != "" {
+ listReposReq.Endpoint = &endpoint
+ }
+ response, err := apiCli.Repositories.ListRepos(listReposReq, authToken)
+ if err != nil {
+ return "", err
+ }
+ if len(response.Payload) == 0 {
+ return "", fmt.Errorf("repository %s was not found", nameOrID)
+ }
+
+ if len(response.Payload) > 1 {
+ return "", fmt.Errorf("multiple repositories with the name %s exist, please use the repository ID or specify the --endpoint parameter", nameOrID)
+ }
+ return response.Payload[0].ID, nil
+}
+
+func resolveOrganization(nameOrID, endpoint string) (string, error) {
+ if nameOrID == "" {
+ return "", fmt.Errorf("missing organization name or ID")
+ }
+ entityID, err := uuid.Parse(nameOrID)
+ if err == nil {
+ return entityID.String(), nil
+ }
+
+ listOrgsReq := apiClientOrgs.NewListOrgsParams()
+ listOrgsReq.Name = &nameOrID
+ if endpoint != "" {
+ listOrgsReq.Endpoint = &endpoint
+ }
+ response, err := apiCli.Organizations.ListOrgs(listOrgsReq, authToken)
+ if err != nil {
+ return "", err
+ }
+
+ if len(response.Payload) == 0 {
+ return "", fmt.Errorf("organization %s was not found", nameOrID)
+ }
+
+ if len(response.Payload) > 1 {
+ return "", fmt.Errorf("multiple organizations with the name %s exist, please use the organization ID or specify the --endpoint parameter", nameOrID)
+ }
+
+ return response.Payload[0].ID, nil
+}
+
+func resolveEnterprise(nameOrID, endpoint string) (string, error) {
+ if nameOrID == "" {
+ return "", fmt.Errorf("missing enterprise name or ID")
+ }
+ entityID, err := uuid.Parse(nameOrID)
+ if err == nil {
+ return entityID.String(), nil
+ }
+
+ listEnterprisesReq := apiClientEnterprises.NewListEnterprisesParams()
+ listEnterprisesReq.Name = &enterpriseName
+ if endpoint != "" {
+ listEnterprisesReq.Endpoint = &endpoint
+ }
+ response, err := apiCli.Enterprises.ListEnterprises(listEnterprisesReq, authToken)
+ if err != nil {
+ return "", err
+ }
+
+ if len(response.Payload) == 0 {
+ return "", fmt.Errorf("enterprise %s was not found", nameOrID)
+ }
+
+ if len(response.Payload) > 1 {
+ return "", fmt.Errorf("multiple enterprises with the name %s exist, please use the enterprise ID or specify the --endpoint parameter", nameOrID)
+ }
+
+ return response.Payload[0].ID, nil
+}
diff --git a/cmd/garm-cli/cmd/version.go b/cmd/garm-cli/cmd/version.go
index 99253aed..ce51142f 100644
--- a/cmd/garm-cli/cmd/version.go
+++ b/cmd/garm-cli/cmd/version.go
@@ -18,6 +18,9 @@ import (
"fmt"
"github.com/spf13/cobra"
+
+ apiClientControllerInfo "github.com/cloudbase/garm/client/controller_info"
+ "github.com/cloudbase/garm/util/appdefaults"
)
// runnerCmd represents the runner command
@@ -25,8 +28,19 @@ var versionCmd = &cobra.Command{
Use: "version",
SilenceUsage: true,
Short: "Print version and exit",
- Run: func(cmd *cobra.Command, args []string) {
- fmt.Println(Version)
+ Run: func(_ *cobra.Command, _ []string) {
+ serverVersion := "v0.0.0-unknown"
+
+ if !needsInit {
+ showInfo := apiClientControllerInfo.NewControllerInfoParams()
+ response, err := apiCli.ControllerInfo.ControllerInfo(showInfo, authToken)
+ if err == nil {
+ serverVersion = response.Payload.Version
+ }
+ }
+
+ fmt.Printf("garm-cli: %s\n", appdefaults.GetVersion())
+ fmt.Printf("garm server: %s\n", serverVersion)
},
}
diff --git a/cmd/garm-cli/common/cobra.go b/cmd/garm-cli/common/cobra.go
new file mode 100644
index 00000000..399a4b92
--- /dev/null
+++ b/cmd/garm-cli/common/cobra.go
@@ -0,0 +1,44 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package common
+
+import "fmt"
+
+type OutputFormat string
+
+const (
+ OutputFormatTable OutputFormat = "table"
+ OutputFormatJSON OutputFormat = "json"
+)
+
+func (o *OutputFormat) String() string {
+ if o == nil {
+ return ""
+ }
+ return string(*o)
+}
+
+func (o *OutputFormat) Set(value string) error {
+ switch value {
+ case "table", "json":
+ *o = OutputFormat(value)
+ default:
+ return fmt.Errorf("allowed formats are: json, table")
+ }
+ return nil
+}
+
+func (o *OutputFormat) Type() string {
+ return "string"
+}
diff --git a/cmd/garm-cli/common/common.go b/cmd/garm-cli/common/common.go
index 8164b9a7..1f607cb4 100644
--- a/cmd/garm-cli/common/common.go
+++ b/cmd/garm-cli/common/common.go
@@ -15,13 +15,22 @@
package common
import (
+ "encoding/json"
"errors"
+ "fmt"
+ "os"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
"github.com/manifoldco/promptui"
"github.com/nbutton23/zxcvbn-go"
+
+ "github.com/cloudbase/garm-provider-common/util"
)
-func PromptPassword(label string) (string, error) {
+func PromptPassword(label string, compareTo string) (string, error) {
if label == "" {
label = "Password"
}
@@ -30,6 +39,9 @@ func PromptPassword(label string) (string, error) {
if passwordStenght.Score < 4 {
return errors.New("password is too weak")
}
+ if compareTo != "" && compareTo != input {
+ return errors.New("passwords do not match")
+ }
return nil
}
@@ -39,14 +51,13 @@ func PromptPassword(label string) (string, error) {
Mask: '*',
}
result, err := prompt.Run()
-
if err != nil {
return "", err
}
return result, nil
}
-func PromptString(label string) (string, error) {
+func PromptString(label string, a ...interface{}) (string, error) {
validate := func(input string) error {
if len(input) == 0 {
return errors.New("empty input not allowed")
@@ -55,13 +66,260 @@ func PromptString(label string) (string, error) {
}
prompt := promptui.Prompt{
- Label: label,
+ Label: fmt.Sprintf(label, a...),
Validate: validate,
}
result, err := prompt.Run()
-
if err != nil {
return "", err
}
return result, nil
}
+
+func PrintWebsocketMessage(_ int, msg []byte) error {
+ fmt.Println(util.SanitizeLogEntry(string(msg)))
+ return nil
+}
+
+type LogFormatter struct {
+ MinLevel string
+ AttributeFilters map[string]string
+ EnableColor bool
+}
+
+type LogRecord struct {
+ Time string `json:"time"`
+ Level string `json:"level"`
+ Msg string `json:"msg"`
+ Attrs map[string]interface{} `json:",inline"`
+}
+
+// Color codes for different log levels
+const (
+ ColorReset = "\033[0m"
+ ColorRed = "\033[31m"
+ ColorYellow = "\033[33m"
+ ColorBlue = "\033[34m"
+ ColorMagenta = "\033[35m"
+ ColorCyan = "\033[36m"
+ ColorWhite = "\033[37m"
+ ColorGray = "\033[90m"
+)
+
+func (lf *LogFormatter) colorizeLevel(level string) string {
+ if !lf.EnableColor {
+ return level
+ }
+
+ levelUpper := strings.TrimSpace(strings.ToUpper(level))
+ switch levelUpper {
+ case "ERROR":
+ return ColorRed + level + ColorReset
+ case "WARN", "WARNING":
+ return ColorYellow + level + ColorReset
+ case "INFO":
+ return ColorBlue + level + ColorReset
+ case "DEBUG":
+ return ColorMagenta + level + ColorReset
+ default:
+ return level
+ }
+}
+
+func (lf *LogFormatter) shouldFilterLevel(level string) bool {
+ if lf.MinLevel == "" {
+ return false
+ }
+
+ levelMap := map[string]int{
+ "DEBUG": 0,
+ "INFO": 1,
+ "WARN": 2,
+ "ERROR": 3,
+ }
+
+ minLevelNum, exists := levelMap[strings.ToUpper(lf.MinLevel)]
+ if !exists {
+ return false
+ }
+
+ currentLevelNum, exists := levelMap[strings.ToUpper(level)]
+ if !exists {
+ return false
+ }
+
+ return currentLevelNum < minLevelNum
+}
+
+func (lf *LogFormatter) matchesAttributeFilters(attrs map[string]interface{}, msg string) bool {
+ if len(lf.AttributeFilters) == 0 {
+ return true
+ }
+
+ for key, expectedValue := range lf.AttributeFilters {
+ // Special handling for message filtering
+ if key == "msg" {
+ if strings.Contains(msg, expectedValue) {
+ return true
+ }
+ }
+
+ // Regular attribute filtering
+ actualValue, exists := attrs[key]
+ if exists {
+ actualStr := fmt.Sprintf("%v", actualValue)
+ if actualStr == expectedValue {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+func (lf *LogFormatter) FormatWebsocketMessage(_ int, msg []byte) error {
+ // Try to parse as JSON log record
+ var logRecord LogRecord
+ err := json.Unmarshal(msg, &logRecord)
+ if err != nil {
+ // If it's not JSON, print as-is (sanitized)
+ _, err = fmt.Println(util.SanitizeLogEntry(string(msg)))
+ return err
+ }
+
+ // Apply level filtering
+ if lf.shouldFilterLevel(logRecord.Level) {
+ return nil
+ }
+
+ // Parse additional attributes from the JSON
+ var fullRecord map[string]interface{}
+ if err := json.Unmarshal(msg, &fullRecord); err == nil {
+ // Remove standard fields and keep only attributes
+ delete(fullRecord, "time")
+ delete(fullRecord, "level")
+ delete(fullRecord, "msg")
+ logRecord.Attrs = fullRecord
+ }
+
+ // Apply attribute filtering
+ if !lf.matchesAttributeFilters(logRecord.Attrs, logRecord.Msg) {
+ return nil
+ }
+
+ // Format timestamp to fixed width
+ timeStr := logRecord.Time
+ if t, err := time.Parse(time.RFC3339Nano, logRecord.Time); err == nil {
+ timeStr = t.Format("2006-01-02 15:04:05.000")
+ }
+
+ // Format log level to fixed width (5 characters)
+ levelStr := lf.colorizeLevel(fmt.Sprintf("%-5s", strings.ToUpper(logRecord.Level)))
+
+ // Highlight message if it matches a msg filter
+ msgStr := logRecord.Msg
+ if msgFilter, hasMsgFilter := lf.AttributeFilters["msg"]; hasMsgFilter {
+ if strings.Contains(msgStr, msgFilter) && lf.EnableColor {
+ msgStr = ColorYellow + msgStr + ColorReset
+ }
+ }
+
+ output := fmt.Sprintf("%s [%s] %s", timeStr, levelStr, msgStr)
+
+ // Add attributes if any
+ if len(logRecord.Attrs) > 0 {
+ // Get sorted keys for consistent output
+ var keys []string
+ for k := range logRecord.Attrs {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ var attrPairs []string
+ for _, k := range keys {
+ v := logRecord.Attrs[k]
+ attrStr := fmt.Sprintf("%s=%v", k, v)
+
+ // Highlight filtered attributes
+ if filterValue, isFiltered := lf.AttributeFilters[k]; isFiltered && fmt.Sprintf("%v", v) == filterValue {
+ if lf.EnableColor {
+ attrStr = ColorYellow + attrStr + ColorGray
+ }
+ } else if lf.EnableColor {
+ attrStr = ColorGray + attrStr
+ }
+
+ attrPairs = append(attrPairs, attrStr)
+ }
+ if len(attrPairs) > 0 {
+ if lf.EnableColor {
+ output += " " + strings.Join(attrPairs, " ") + ColorReset
+ } else {
+ output += " " + strings.Join(attrPairs, " ")
+ }
+ }
+ }
+
+ fmt.Println(output)
+ return nil
+}
+
+// supportsColor checks if the current terminal/environment supports ANSI colors.
+// This is best effort. There is no reliable way to determine if a terminal supports
+// color. Set NO_COLOR=1 to disable color if your terminal doesn't support it, but this
+// function returns true.
+func supportsColor() bool {
+ // Check NO_COLOR environment variable (universal standard)
+ if os.Getenv("NO_COLOR") != "" {
+ return false
+ }
+
+ // Check FORCE_COLOR environment variable
+ if os.Getenv("FORCE_COLOR") != "" {
+ return true
+ }
+
+ // On Windows, check for modern terminal support
+ if runtime.GOOS == "windows" {
+ // Check for Windows Terminal
+ if os.Getenv("WT_SESSION") != "" {
+ return true
+ }
+ // Check for ConEmu
+ if os.Getenv("ConEmuANSI") == "ON" {
+ return true
+ }
+ // Check for other modern terminals
+ term := os.Getenv("TERM")
+ if strings.Contains(term, "color") || term == "xterm-256color" || term == "screen-256color" {
+ return true
+ }
+ // Modern PowerShell and cmd.exe with VT processing
+ if os.Getenv("TERM_PROGRAM") != "" {
+ return true
+ }
+ // Default to false for older Windows cmd.exe
+ return false
+ }
+
+ // On Unix-like systems, check TERM
+ term := os.Getenv("TERM")
+ if term == "" || term == "dumb" {
+ return false
+ }
+
+ return true
+}
+
+func NewLogFormatter(minLevel string, attributeFilters map[string]string, color bool) *LogFormatter {
+ var enableColor bool
+ if color && supportsColor() {
+ enableColor = true
+ }
+
+ return &LogFormatter{
+ MinLevel: minLevel,
+ AttributeFilters: attributeFilters,
+ EnableColor: enableColor,
+ }
+}
diff --git a/cmd/garm-cli/config/config.go b/cmd/garm-cli/config/config.go
index 133b38da..cf1cf1d2 100644
--- a/cmd/garm-cli/config/config.go
+++ b/cmd/garm-cli/config/config.go
@@ -15,15 +15,15 @@
package config
import (
+ "errors"
"fmt"
"os"
"path/filepath"
"sync"
"github.com/BurntSushi/toml"
- "github.com/pkg/errors"
- runnerErrors "github.com/cloudbase/garm/errors"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
)
const (
@@ -34,11 +34,11 @@ const (
func getConfigFilePath() (string, error) {
configDir, err := getHomeDir()
if err != nil {
- return "", errors.Wrap(err, "fetching home folder")
+ return "", fmt.Errorf("error fetching home folder: %w", err)
}
if err := ensureHomeDir(configDir); err != nil {
- return "", errors.Wrap(err, "ensuring config dir")
+ return "", fmt.Errorf("error ensuring config dir: %w", err)
}
cfgFile := filepath.Join(configDir, DefaultConfigFileName)
@@ -48,7 +48,7 @@ func getConfigFilePath() (string, error) {
func LoadConfig() (*Config, error) {
cfgFile, err := getConfigFilePath()
if err != nil {
- return nil, errors.Wrap(err, "fetching config")
+ return nil, fmt.Errorf("error fetching config: %w", err)
}
if _, err := os.Stat(cfgFile); err != nil {
@@ -56,12 +56,12 @@ func LoadConfig() (*Config, error) {
// return empty config
return &Config{}, nil
}
- return nil, errors.Wrap(err, "accessing config file")
+ return nil, fmt.Errorf("error accessing config file: %w", err)
}
var config Config
if _, err := toml.DecodeFile(cfgFile, &config); err != nil {
- return nil, errors.Wrap(err, "decoding toml")
+ return nil, fmt.Errorf("error decoding toml: %w", err)
}
return &config, nil
@@ -157,17 +157,17 @@ func (c *Config) SaveConfig() error {
cfgFile, err := getConfigFilePath()
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
- return errors.Wrap(err, "getting config")
+ return fmt.Errorf("error getting config: %w", err)
}
}
cfgHandle, err := os.Create(cfgFile)
if err != nil {
- return errors.Wrap(err, "getting file handle")
+ return fmt.Errorf("error getting file handle: %w", err)
}
encoder := toml.NewEncoder(cfgHandle)
if err := encoder.Encode(c); err != nil {
- return errors.Wrap(err, "saving config")
+ return fmt.Errorf("error saving config: %w", err)
}
return nil
diff --git a/cmd/garm-cli/config/home.go b/cmd/garm-cli/config/home.go
index b6043289..11821e9c 100644
--- a/cmd/garm-cli/config/home.go
+++ b/cmd/garm-cli/config/home.go
@@ -15,19 +15,19 @@
package config
import (
+ "errors"
+ "fmt"
"os"
-
- "github.com/pkg/errors"
)
func ensureHomeDir(folder string) error {
if _, err := os.Stat(folder); err != nil {
if !errors.Is(err, os.ErrNotExist) {
- return errors.Wrap(err, "checking home dir")
+ return fmt.Errorf("error checking home dir: %w", err)
}
if err := os.MkdirAll(folder, 0o710); err != nil {
- return errors.Wrapf(err, "creating %s", folder)
+ return fmt.Errorf("error creating %s: %w", folder, err)
}
}
diff --git a/cmd/garm-cli/config/home_nix.go b/cmd/garm-cli/config/home_nix.go
index 92c99bad..323f29d7 100644
--- a/cmd/garm-cli/config/home_nix.go
+++ b/cmd/garm-cli/config/home_nix.go
@@ -1,20 +1,31 @@
//go:build !windows
// +build !windows
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
package config
import (
+ "fmt"
"os"
"path/filepath"
-
- "github.com/pkg/errors"
)
func getHomeDir() (string, error) {
home, err := os.UserHomeDir()
-
if err != nil {
- return "", errors.Wrap(err, "fetching home dir")
+ return "", fmt.Errorf("error fetching home dir: %w", err)
}
return filepath.Join(home, ".local", "share", DefaultAppFolder), nil
diff --git a/cmd/garm-cli/config/home_windows.go b/cmd/garm-cli/config/home_windows.go
index d34379b4..c70fb645 100644
--- a/cmd/garm-cli/config/home_windows.go
+++ b/cmd/garm-cli/config/home_windows.go
@@ -1,6 +1,19 @@
//go:build windows && !linux
// +build windows,!linux
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
package config
import (
diff --git a/cmd/garm/main.go b/cmd/garm/main.go
index 79a41092..cba3a064 100644
--- a/cmd/garm/main.go
+++ b/cmd/garm/main.go
@@ -18,29 +18,40 @@ import (
"context"
"flag"
"fmt"
- "io"
"log"
+ "log/slog"
"net"
"net/http"
"os"
"os/signal"
+ "runtime"
+ "syscall"
"time"
+ "github.com/gorilla/handlers"
+ "github.com/gorilla/mux"
+ lumberjack "gopkg.in/natefinch/lumberjack.v2"
+
+ "github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/apiserver/controllers"
"github.com/cloudbase/garm/apiserver/routers"
"github.com/cloudbase/garm/auth"
"github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/database"
"github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ "github.com/cloudbase/garm/locking"
"github.com/cloudbase/garm/metrics"
- "github.com/cloudbase/garm/runner"
- "github.com/cloudbase/garm/util"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner" //nolint:typecheck
+ runnerMetrics "github.com/cloudbase/garm/runner/metrics"
+ "github.com/cloudbase/garm/runner/providers"
+ garmUtil "github.com/cloudbase/garm/util"
"github.com/cloudbase/garm/util/appdefaults"
"github.com/cloudbase/garm/websocket"
-
- "github.com/gorilla/handlers"
- "github.com/gorilla/mux"
- "github.com/pkg/errors"
+ "github.com/cloudbase/garm/workers/cache"
+ "github.com/cloudbase/garm/workers/entity"
+ "github.com/cloudbase/garm/workers/provider"
)
var (
@@ -48,66 +59,217 @@ var (
version = flag.Bool("version", false, "prints version")
)
-var Version string
-
-func maybeInitController(db common.Store) error {
- if _, err := db.ControllerInfo(); err == nil {
- return nil
- }
-
- if _, err := db.InitController(); err != nil {
- return errors.Wrap(err, "initializing controller")
- }
-
- return nil
+var signals = []os.Signal{
+ os.Interrupt,
+ syscall.SIGTERM,
}
-func main() {
- flag.Parse()
- if *version {
- fmt.Println(Version)
- return
+func maybeInitController(db common.Store) (params.ControllerInfo, error) {
+ if info, err := db.ControllerInfo(); err == nil {
+ return info, nil
}
- ctx, stop := signal.NotifyContext(context.Background(), signals...)
- defer stop()
- fmt.Println(ctx)
- cfg, err := config.NewConfig(*conf)
+ info, err := db.InitController()
if err != nil {
- log.Fatalf("Fetching config: %+v", err)
+ return params.ControllerInfo{}, fmt.Errorf("error initializing controller: %w", err)
}
- logWriter, err := util.GetLoggingWriter(cfg)
+ return info, nil
+}
+
+func setupLogging(ctx context.Context, logCfg config.Logging, hub *websocket.Hub) {
+ logWriter, err := util.GetLoggingWriter(logCfg.LogFile)
if err != nil {
log.Fatalf("fetching log writer: %+v", err)
}
- var writers []io.Writer = []io.Writer{
- logWriter,
+ // rotate log file on SIGHUP
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, syscall.SIGHUP)
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ // Daemon is exiting.
+ return
+ case <-ch:
+ // we got a SIGHUP. Rotate log file.
+ if logger, ok := logWriter.(*lumberjack.Logger); ok {
+ if err := logger.Rotate(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to rotate log file")
+ }
+ }
+ }
+ }
+ }()
+
+ var logLevel slog.Level
+ switch logCfg.LogLevel {
+ case config.LevelDebug:
+ logLevel = slog.LevelDebug
+ case config.LevelInfo:
+ logLevel = slog.LevelInfo
+ case config.LevelWarn:
+ logLevel = slog.LevelWarn
+ case config.LevelError:
+ logLevel = slog.LevelError
+ default:
+ logLevel = slog.LevelInfo
}
+
+ // logger options
+ opts := slog.HandlerOptions{
+ AddSource: logCfg.LogSource,
+ Level: logLevel,
+ }
+
+ var fileHan slog.Handler
+ switch logCfg.LogFormat {
+ case config.FormatJSON:
+ fileHan = slog.NewJSONHandler(logWriter, &opts)
+ default:
+ fileHan = slog.NewTextHandler(logWriter, &opts)
+ }
+
+ handlers := []slog.Handler{
+ fileHan,
+ }
+
+ if hub != nil {
+ wsHan := slog.NewJSONHandler(hub, &opts)
+ handlers = append(handlers, wsHan)
+ }
+
+ wrapped := &garmUtil.SlogMultiHandler{
+ Handlers: handlers,
+ }
+ slog.SetDefault(slog.New(wrapped))
+}
+
+func maybeUpdateURLsFromConfig(cfg config.Config, store common.Store) error {
+ info, err := store.ControllerInfo()
+ if err != nil {
+ return fmt.Errorf("error fetching controller info: %w", err)
+ }
+
+ var updateParams params.UpdateControllerParams
+
+ if info.MetadataURL == "" && cfg.Default.MetadataURL != "" {
+ updateParams.MetadataURL = &cfg.Default.MetadataURL
+ }
+
+ if info.CallbackURL == "" && cfg.Default.CallbackURL != "" {
+ updateParams.CallbackURL = &cfg.Default.CallbackURL
+ }
+
+ if info.WebhookURL == "" && cfg.Default.WebhookURL != "" {
+ updateParams.WebhookURL = &cfg.Default.WebhookURL
+ }
+
+ if updateParams.MetadataURL == nil && updateParams.CallbackURL == nil && updateParams.WebhookURL == nil {
+ // nothing to update
+ return nil
+ }
+
+ _, err = store.UpdateController(updateParams)
+ if err != nil {
+ return fmt.Errorf("error updating controller info: %w", err)
+ }
+ return nil
+}
+
+//gocyclo:ignore
+func main() {
+ flag.Parse()
+ if *version {
+ fmt.Println(appdefaults.GetVersion())
+ return
+ }
+ ctx, stop := signal.NotifyContext(context.Background(), signals...)
+ defer stop()
+ watcher.InitWatcher(ctx)
+
+ ctx = auth.GetAdminContext(ctx)
+
+ cfg, err := config.NewConfig(*conf)
+ if err != nil {
+ log.Fatalf("Fetching config: %+v", err) //nolint:gocritic
+ }
+
+ logCfg := cfg.GetLoggingConfig()
var hub *websocket.Hub
- if cfg.Default.EnableLogStreamer {
+ if logCfg.EnableLogStreamer != nil && *logCfg.EnableLogStreamer {
hub = websocket.NewHub(ctx)
if err := hub.Start(); err != nil {
log.Fatal(err)
}
defer hub.Stop() //nolint
- writers = append(writers, hub)
}
+ setupLogging(ctx, logCfg, hub)
- multiWriter := io.MultiWriter(writers...)
- log.SetOutput(multiWriter)
-
+ // Migrate credentials to the new format. This field will be read
+ // by the DB migration logic.
+ cfg.Database.MigrateCredentials = cfg.Github
db, err := database.NewDatabase(ctx, cfg.Database)
if err != nil {
log.Fatal(err)
}
- if err := maybeInitController(db); err != nil {
+ controllerInfo, err := maybeInitController(db)
+ if err != nil {
log.Fatal(err)
}
- runner, err := runner.NewRunner(ctx, *cfg)
+ // Local locker for now. Will be configurable in the future,
+ // as we add scale-out capability to GARM.
+ lock, err := locking.NewLocalLocker(ctx, db)
+ if err != nil {
+ log.Fatalf("failed to create locker: %q", err)
+ }
+
+ if err := locking.RegisterLocker(lock); err != nil {
+ log.Fatalf("failed to register locker: %q", err)
+ }
+
+ if err := maybeUpdateURLsFromConfig(*cfg, db); err != nil {
+ log.Fatal(err)
+ }
+
+ cacheWorker := cache.NewWorker(ctx, db)
+ if err != nil {
+ log.Fatalf("failed to create cache worker: %+v", err)
+ }
+ if err := cacheWorker.Start(); err != nil {
+ log.Fatalf("failed to start cache worker: %+v", err)
+ }
+
+ providers, err := providers.LoadProvidersFromConfig(ctx, *cfg, controllerInfo.ControllerID.String())
+ if err != nil {
+ log.Fatalf("loading providers: %+v", err)
+ }
+
+ entityController, err := entity.NewController(ctx, db, providers)
+ if err != nil {
+ log.Fatalf("failed to create entity controller: %+v", err)
+ }
+ if err := entityController.Start(); err != nil {
+ log.Fatalf("failed to start entity controller: %+v", err)
+ }
+
+ instanceTokenGetter, err := auth.NewInstanceTokenGetter(cfg.JWTAuth.Secret)
+ if err != nil {
+ log.Fatalf("failed to create instance token getter: %+v", err)
+ }
+
+ providerWorker, err := provider.NewWorker(ctx, db, providers, instanceTokenGetter)
+ if err != nil {
+ log.Fatalf("failed to create provider worker: %+v", err)
+ }
+ if err := providerWorker.Start(); err != nil {
+ log.Fatalf("failed to start provider worker: %+v", err)
+ }
+
+ runner, err := runner.NewRunner(ctx, *cfg, db)
if err != nil {
log.Fatalf("failed to create controller: %+v", err)
}
@@ -118,7 +280,7 @@ func main() {
}
authenticator := auth.NewAuthenticator(cfg.JWTAuth, db)
- controller, err := controllers.NewAPIController(runner, authenticator, hub)
+ controller, err := controllers.NewAPIController(runner, authenticator, hub, cfg.APIServer)
if err != nil {
log.Fatalf("failed to create controller: %+v", err)
}
@@ -138,20 +300,40 @@ func main() {
log.Fatal(err)
}
+ urlsRequiredMiddleware, err := auth.NewUrlsRequiredMiddleware(db)
+ if err != nil {
+ log.Fatal(err)
+ }
+
metricsMiddleware, err := auth.NewMetricsMiddleware(cfg.JWTAuth)
if err != nil {
log.Fatal(err)
}
- router := routers.NewAPIRouter(controller, multiWriter, jwtMiddleware, initMiddleware, instanceMiddleware)
+ router := routers.NewAPIRouter(controller, jwtMiddleware, initMiddleware, urlsRequiredMiddleware, instanceMiddleware, cfg.Default.EnableWebhookManagement)
+ // Add WebUI routes
+ router = routers.WithWebUI(router, cfg.APIServer)
+
+ // start the metrics collector
if cfg.Metrics.Enable {
- log.Printf("registering prometheus metrics collectors")
- if err := metrics.RegisterCollectors(runner); err != nil {
+ slog.InfoContext(ctx, "setting up metric routes")
+ router = routers.WithMetricsRouter(router, cfg.Metrics.DisableAuth, metricsMiddleware)
+
+ slog.InfoContext(ctx, "register metrics")
+ if err := metrics.RegisterMetrics(); err != nil {
log.Fatal(err)
}
- log.Printf("setting up metric routes")
- router = routers.WithMetricsRouter(router, cfg.Metrics.DisableAuth, metricsMiddleware)
+
+ slog.InfoContext(ctx, "start metrics collection")
+ runnerMetrics.CollectObjectMetric(ctx, runner, cfg.Metrics.Duration())
+ }
+
+ if cfg.Default.DebugServer {
+ runtime.SetBlockProfileRate(1)
+ runtime.SetMutexProfileFraction(1)
+ slog.InfoContext(ctx, "setting up debug routes")
+ router = routers.WithDebugServer(router)
}
corsMw := mux.CORSMethodMiddleware(router)
@@ -161,6 +343,8 @@ func main() {
methodsOk := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "OPTIONS", "DELETE"})
headersOk := handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "Authorization"})
+ // nolint:golangci-lint,gosec
+ // G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server
srv := &http.Server{
Addr: cfg.APIServer.BindAddress(),
// Pass our instance of gorilla/mux in.
@@ -175,25 +359,41 @@ func main() {
go func() {
if cfg.APIServer.UseTLS {
if err := srv.ServeTLS(listener, cfg.APIServer.TLSConfig.CRT, cfg.APIServer.TLSConfig.Key); err != nil {
- log.Printf("Listening: %+v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "Listening")
}
} else {
if err := srv.Serve(listener); err != http.ErrServerClosed {
- log.Printf("Listening: %+v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "Listening")
}
}
}()
<-ctx.Done()
+
+ slog.InfoContext(ctx, "shutting down http server")
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 60*time.Second)
defer shutdownCancel()
if err := srv.Shutdown(shutdownCtx); err != nil {
- log.Printf("graceful api server shutdown failed: %+v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "graceful api server shutdown failed")
}
- log.Printf("waiting for runner to stop")
+ if err := cacheWorker.Stop(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to stop credentials worker")
+ }
+
+ slog.InfoContext(ctx, "shutting down entity controller")
+ if err := entityController.Stop(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to stop entity controller")
+ }
+
+ slog.InfoContext(ctx, "shutting down provider worker")
+ if err := providerWorker.Stop(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to stop provider worker")
+ }
+
+ slog.With(slog.Any("error", err)).InfoContext(ctx, "waiting for runner to stop")
if err := runner.Wait(); err != nil {
- log.Printf("failed to shutdown workers: %+v", err)
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to shutdown workers")
os.Exit(1)
}
}
diff --git a/cmd/garm/signal_nix.go b/cmd/garm/signal_nix.go
deleted file mode 100644
index 152b2d96..00000000
--- a/cmd/garm/signal_nix.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package main
-
-import (
- "os"
- "syscall"
-)
-
-var signals = []os.Signal{
- os.Interrupt,
- syscall.SIGTERM,
-}
diff --git a/cmd/garm/signal_windows.go b/cmd/garm/signal_windows.go
deleted file mode 100644
index b424d6dd..00000000
--- a/cmd/garm/signal_windows.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build windows && !linux
-// +build windows,!linux
-
-package main
-
-import "os"
-
-var signals = []os.Signal{
- os.Interrupt,
-}
diff --git a/config/config.go b/config/config.go
index 2e7aee60..31a16ae2 100644
--- a/config/config.go
+++ b/config/config.go
@@ -15,43 +15,78 @@
package config
import (
+ "context"
"crypto/tls"
"crypto/x509"
+ "encoding/pem"
"fmt"
- "log"
+ "log/slog"
"net"
+ "net/http"
"net/url"
"os"
"path/filepath"
"time"
"github.com/BurntSushi/toml"
+ "github.com/bradleyfalzon/ghinstallation/v2"
+ zxcvbn "github.com/nbutton23/zxcvbn-go"
+ "golang.org/x/oauth2"
+
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/util/appdefaults"
- zxcvbn "github.com/nbutton23/zxcvbn-go"
- "github.com/pkg/errors"
)
-type DBBackendType string
+type (
+ DBBackendType string
+ LogLevel string
+ LogFormat string
+ GithubAuthType string
+)
const (
// MySQLBackend represents the MySQL DB backend
MySQLBackend DBBackendType = "mysql"
// SQLiteBackend represents the SQLite3 DB backend
SQLiteBackend DBBackendType = "sqlite3"
+ // EnvironmentVariablePrefix is the prefix for all environment variables
+ // that can not be used to get overwritten via the external provider
+ EnvironmentVariablePrefix = "GARM"
+)
+
+const (
+ // LevelDebug is the debug log level
+ LevelDebug LogLevel = "debug"
+ // LevelInfo is the info log level
+ LevelInfo LogLevel = "info"
+ // LevelWarn is the warn log level
+ LevelWarn LogLevel = "warn"
+ // LevelError is the error log level
+ LevelError LogLevel = "error"
+)
+
+const (
+ // FormatText is the text log format
+ FormatText LogFormat = "text"
+ // FormatJSON is the json log format
+ FormatJSON LogFormat = "json"
+)
+
+const (
+ // GithubAuthTypePAT is the OAuth token based authentication
+ GithubAuthTypePAT GithubAuthType = "pat"
+ // GithubAuthTypeApp is the GitHub App based authentication
+ GithubAuthTypeApp GithubAuthType = "app"
)
// NewConfig returns a new Config
func NewConfig(cfgFile string) (*Config, error) {
var config Config
if _, err := toml.DecodeFile(cfgFile, &config); err != nil {
- return nil, errors.Wrap(err, "decoding toml")
- }
- if config.Default.ConfigDir == "" {
- config.Default.ConfigDir = appdefaults.DefaultConfigDir
+ return nil, fmt.Errorf("error decoding toml: %w", err)
}
if err := config.Validate(); err != nil {
- return nil, errors.Wrap(err, "validating config")
+ return nil, fmt.Errorf("error validating config: %w", err)
}
return &config, nil
}
@@ -64,38 +99,43 @@ type Config struct {
Providers []Provider `toml:"provider,omitempty" json:"provider,omitempty"`
Github []Github `toml:"github,omitempty"`
JWTAuth JWTAuth `toml:"jwt_auth" json:"jwt-auth"`
+ Logging Logging `toml:"logging" json:"logging"`
}
// Validate validates the config
func (c *Config) Validate() error {
if err := c.APIServer.Validate(); err != nil {
- return errors.Wrap(err, "validating APIServer config")
+ return fmt.Errorf("error validating apiserver config: %w", err)
}
if err := c.Database.Validate(); err != nil {
- return errors.Wrap(err, "validating database config")
+ return fmt.Errorf("error validating database config: %w", err)
}
if err := c.Default.Validate(); err != nil {
- return errors.Wrap(err, "validating default section")
+ return fmt.Errorf("error validating default config: %w", err)
}
for _, gh := range c.Github {
if err := gh.Validate(); err != nil {
- return errors.Wrap(err, "validating github config")
+ return fmt.Errorf("error validating github config: %w", err)
}
}
if err := c.JWTAuth.Validate(); err != nil {
- return errors.Wrap(err, "validating jwt config")
+ return fmt.Errorf("error validating jwt_auth config: %w", err)
+ }
+
+ if err := c.Logging.Validate(); err != nil {
+ return fmt.Errorf("error validating logging config: %w", err)
}
providerNames := map[string]int{}
for _, provider := range c.Providers {
if err := provider.Validate(); err != nil {
- return errors.Wrap(err, "validating provider")
+ return fmt.Errorf("error validating provider %s: %w", provider.Name, err)
}
- providerNames[provider.Name] += 1
+ providerNames[provider.Name]++
}
for name, count := range providerNames {
@@ -107,43 +147,133 @@ func (c *Config) Validate() error {
return nil
}
+func (c *Config) GetLoggingConfig() Logging {
+ logging := c.Logging
+ if logging.LogFormat == "" {
+ logging.LogFormat = FormatText
+ }
+
+ if logging.LogLevel == "" {
+ logging.LogLevel = LevelInfo
+ }
+
+ // maintain backwards compatibility
+ if logging.LogFile == "" && c.Default.LogFile != "" {
+ logging.LogFile = c.Default.LogFile
+ }
+ if logging.EnableLogStreamer == nil && c.Default.EnableLogStreamer != nil {
+ logging.EnableLogStreamer = c.Default.EnableLogStreamer
+ }
+
+ return logging
+}
+
+type Logging struct {
+ // LogFile is the location of the log file.
+ LogFile string `toml:"log_file,omitempty" json:"log-file"`
+ // EnableLogStreamer enables the log streamer over websockets.
+ EnableLogStreamer *bool `toml:"enable_log_streamer,omitempty" json:"enable-log-streamer,omitempty"`
+ // LogLevel is the log level.
+ LogLevel LogLevel `toml:"log_level" json:"log-format"`
+ // LogFormat is the log format.
+ LogFormat LogFormat `toml:"log_format" json:"log-level"`
+ // LogSource enables the log source.
+ LogSource bool `toml:"log_source" json:"log-source"`
+}
+
+func (l *Logging) Validate() error {
+ if l.LogLevel != LevelDebug && l.LogLevel != LevelInfo && l.LogLevel != LevelWarn && l.LogLevel != LevelError && l.LogLevel != "" {
+ return fmt.Errorf("invalid log level: %s", l.LogLevel)
+ }
+
+ if l.LogFormat != FormatText && l.LogFormat != FormatJSON && l.LogFormat != "" {
+ return fmt.Errorf("invalid log format: %s", l.LogFormat)
+ }
+
+ return nil
+}
+
type Default struct {
- // ConfigDir is the folder where the runner may save any aditional files
- // or configurations it may need. Things like auto-generated SSH keys that
- // may be used to access the runner instances.
- ConfigDir string `toml:"config_dir,omitempty" json:"config-dir,omitempty"`
// CallbackURL is the URL where the instances can send back status reports.
CallbackURL string `toml:"callback_url" json:"callback-url"`
// MetadataURL is the URL where instances can fetch information they may need
// to set themselves up.
MetadataURL string `toml:"metadata_url" json:"metadata-url"`
+ // WebhookURL is the URL that will be installed as a webhook target in github.
+ WebhookURL string `toml:"webhook_url" json:"webhook-url"`
+ // EnableWebhookManagement enables the webhook management API.
+ EnableWebhookManagement bool `toml:"enable_webhook_management" json:"enable-webhook-management"`
+
// LogFile is the location of the log file.
LogFile string `toml:"log_file,omitempty" json:"log-file"`
- EnableLogStreamer bool `toml:"enable_log_streamer"`
+ EnableLogStreamer *bool `toml:"enable_log_streamer,omitempty" json:"enable-log-streamer,omitempty"`
+ DebugServer bool `toml:"debug_server" json:"debug-server"`
}
func (d *Default) Validate() error {
- if d.CallbackURL == "" {
- return fmt.Errorf("missing callback_url")
+ if d.CallbackURL != "" {
+ _, err := url.ParseRequestURI(d.CallbackURL)
+ if err != nil {
+ return fmt.Errorf("invalid callback_url: %w", err)
+ }
}
- _, err := url.Parse(d.CallbackURL)
+
+ if d.MetadataURL != "" {
+ if _, err := url.ParseRequestURI(d.MetadataURL); err != nil {
+ return fmt.Errorf("invalid metadata_url: %w", err)
+ }
+ }
+
+ if d.WebhookURL != "" {
+ if _, err := url.ParseRequestURI(d.WebhookURL); err != nil {
+ return fmt.Errorf("invalid webhook_url: %w", err)
+ }
+ }
+ return nil
+}
+
+type GithubPAT struct {
+ OAuth2Token string `toml:"oauth2_token" json:"oauth2-token"`
+}
+
+type GithubApp struct {
+ AppID int64 `toml:"app_id" json:"app-id"`
+ PrivateKeyPath string `toml:"private_key_path" json:"private-key-path"`
+ InstallationID int64 `toml:"installation_id" json:"installation-id"`
+}
+
+func (a *GithubApp) PrivateKeyBytes() ([]byte, error) {
+ keyBytes, err := os.ReadFile(a.PrivateKeyPath)
if err != nil {
- return errors.Wrap(err, "validating callback_url")
+ return nil, fmt.Errorf("reading private_key_path: %w", err)
+ }
+ return keyBytes, nil
+}
+
+func (a *GithubApp) Validate() error {
+ if a.AppID == 0 {
+ return fmt.Errorf("missing app_id")
+ }
+ if a.PrivateKeyPath == "" {
+ return fmt.Errorf("missing private_key_path")
+ }
+ if a.InstallationID == 0 {
+ return fmt.Errorf("missing installation_id")
}
- if d.MetadataURL == "" {
- return fmt.Errorf("missing metadata-url")
+ if _, err := os.Stat(a.PrivateKeyPath); err != nil {
+ return fmt.Errorf("error accessing private_key_path: %w", err)
}
- if _, err := url.Parse(d.MetadataURL); err != nil {
- return errors.Wrap(err, "validating metadata_url")
+ // Read the private key as bytes
+ keyBytes, err := os.ReadFile(a.PrivateKeyPath)
+ if err != nil {
+ return fmt.Errorf("reading private_key_path: %w", err)
}
-
- if d.ConfigDir == "" {
- return fmt.Errorf("config_dir cannot be empty")
- }
-
- if _, err := os.Stat(d.ConfigDir); err != nil {
- return errors.Wrap(err, "accessing config dir")
+ block, _ := pem.Decode(keyBytes)
+ // Parse the private key as PCKS1
+ _, err = x509.ParsePKCS1PrivateKey(block.Bytes)
+ if err != nil {
+ return fmt.Errorf("parsing private_key_path: %w", err)
}
return nil
@@ -152,8 +282,11 @@ func (d *Default) Validate() error {
// Github hold configuration options specific to interacting with github.
// Currently that is just a OAuth2 personal token.
type Github struct {
- Name string `toml:"name" json:"name"`
- Description string `toml:"description" json:"description"`
+ Name string `toml:"name" json:"name"`
+ Description string `toml:"description" json:"description"`
+ // OAuth2Token is the personal access token used to authenticate with the
+ // github API. This is deprecated and will be removed in the future.
+ // Use the PAT section instead.
OAuth2Token string `toml:"oauth2_token" json:"oauth2-token"`
APIBaseURL string `toml:"api_base_url" json:"api-base-url"`
UploadBaseURL string `toml:"upload_base_url" json:"upload-base-url"`
@@ -161,7 +294,17 @@ type Github struct {
// CACertBundlePath is the path on disk to a CA certificate bundle that
// can validate the endpoints defined above. Leave empty if not using a
// self signed certificate.
- CACertBundlePath string `toml:"ca_cert_bundle" json:"ca-cert-bundle"`
+ CACertBundlePath string `toml:"ca_cert_bundle" json:"ca-cert-bundle"`
+ AuthType GithubAuthType `toml:"auth_type" json:"auth-type"`
+ PAT GithubPAT `toml:"pat" json:"pat"`
+ App GithubApp `toml:"app" json:"app"`
+}
+
+func (g *Github) GetAuthType() GithubAuthType {
+ if g.AuthType == "" {
+ return GithubAuthTypePAT
+ }
+ return g.AuthType
}
func (g *Github) APIEndpoint() string {
@@ -177,12 +320,12 @@ func (g *Github) CACertBundle() ([]byte, error) {
return nil, nil
}
if _, err := os.Stat(g.CACertBundlePath); err != nil {
- return nil, errors.Wrap(err, "accessing CA bundle")
+ return nil, fmt.Errorf("error accessing ca_cert_bundle: %w", err)
}
contents, err := os.ReadFile(g.CACertBundlePath)
if err != nil {
- return nil, errors.Wrap(err, "reading CA bundle")
+ return nil, fmt.Errorf("reading ca_cert_bundle: %w", err)
}
roots := x509.NewCertPool()
@@ -211,21 +354,107 @@ func (g *Github) BaseEndpoint() string {
}
func (g *Github) Validate() error {
- if g.OAuth2Token == "" {
- return fmt.Errorf("missing github oauth2 token")
+ if g.Name == "" {
+ return fmt.Errorf("missing credentials name")
+ }
+
+ if g.APIBaseURL != "" {
+ if _, err := url.ParseRequestURI(g.APIBaseURL); err != nil {
+ return fmt.Errorf("invalid api_base_url: %w", err)
+ }
+ }
+
+ if g.UploadBaseURL != "" {
+ if _, err := url.ParseRequestURI(g.UploadBaseURL); err != nil {
+ return fmt.Errorf("invalid upload_base_url: %w", err)
+ }
+ }
+
+ if g.BaseURL != "" {
+ if _, err := url.ParseRequestURI(g.BaseURL); err != nil {
+ return fmt.Errorf("invalid base_url: %w", err)
+ }
+ }
+
+ switch g.AuthType {
+ case GithubAuthTypeApp:
+ if err := g.App.Validate(); err != nil {
+ return fmt.Errorf("invalid github app config: %w", err)
+ }
+ default:
+ if g.OAuth2Token == "" && g.PAT.OAuth2Token == "" {
+ return fmt.Errorf("missing github oauth2 token")
+ }
+ if g.OAuth2Token != "" {
+ slog.Warn("the github.oauth2_token option is deprecated, please use the PAT section")
+ }
}
return nil
}
+func (g *Github) HTTPClient(ctx context.Context) (*http.Client, error) {
+ if err := g.Validate(); err != nil {
+ return nil, fmt.Errorf("invalid github config: %w", err)
+ }
+ var roots *x509.CertPool
+ caBundle, err := g.CACertBundle()
+ if err != nil {
+ return nil, fmt.Errorf("fetching CA cert bundle: %w", err)
+ }
+ if caBundle != nil {
+ roots = x509.NewCertPool()
+ ok := roots.AppendCertsFromPEM(caBundle)
+ if !ok {
+ return nil, fmt.Errorf("failed to parse CA cert")
+ }
+ }
+ // nolint:golangci-lint,gosec,godox
+ // TODO: set TLS MinVersion
+ httpTransport := &http.Transport{
+ TLSClientConfig: &tls.Config{
+ RootCAs: roots,
+ },
+ }
+
+ var tc *http.Client
+ switch g.AuthType {
+ case GithubAuthTypeApp:
+ itr, err := ghinstallation.NewKeyFromFile(httpTransport, g.App.AppID, g.App.InstallationID, g.App.PrivateKeyPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create github app installation transport: %w", err)
+ }
+
+ tc = &http.Client{Transport: itr}
+ default:
+ httpClient := &http.Client{Transport: httpTransport}
+ ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
+
+ token := g.PAT.OAuth2Token
+ if token == "" {
+ token = g.OAuth2Token
+ }
+
+ ts := oauth2.StaticTokenSource(
+ &oauth2.Token{AccessToken: token},
+ )
+ tc = oauth2.NewClient(ctx, ts)
+ }
+
+ return tc, nil
+}
+
// Provider holds access information for a particular provider.
// A provider offers compute resources on which we spin up self hosted runners.
type Provider struct {
Name string `toml:"name" json:"name"`
ProviderType params.ProviderType `toml:"provider_type" json:"provider-type"`
Description string `toml:"description" json:"description"`
- LXD LXD `toml:"lxd" json:"lxd"`
- External External `toml:"external" json:"external"`
+ // DisableJITConfig explicitly disables JIT configuration and forces runner registration
+ // tokens to be used. This may happen if a provider has not yet been updated to support
+ // JIT configuration.
+ DisableJITConfig bool `toml:"disable_jit_config" json:"disable-jit-config"`
+ External External `toml:"external" json:"external"`
}
func (p *Provider) Validate() error {
@@ -234,13 +463,9 @@ func (p *Provider) Validate() error {
}
switch p.ProviderType {
- case params.LXDProvider:
- if err := p.LXD.Validate(); err != nil {
- return errors.Wrap(err, "validating LXD provider info")
- }
case params.ExternalProvider:
if err := p.External.Validate(); err != nil {
- return errors.Wrap(err, "validating external provider info")
+ return fmt.Errorf("invalid external provider config: %w", err)
}
default:
return fmt.Errorf("unknown provider type: %s", p.ProviderType)
@@ -260,24 +485,29 @@ type Database struct {
// Don't lose or change this. It will invalidate all encrypted data
// in the DB. This field must be set and must be exactly 32 characters.
Passphrase string `toml:"passphrase"`
+
+ // MigrateCredentials is a list of github credentials that need to be migrated
+ // from the config file to the database. This field will be removed once GARM
+ // reaches version 0.2.x. It's only meant to be used for the migration process.
+ MigrateCredentials []Github `toml:"-"`
}
// GormParams returns the database type and connection URI
func (d *Database) GormParams() (dbType DBBackendType, uri string, err error) {
if err := d.Validate(); err != nil {
- return "", "", errors.Wrap(err, "validating database config")
+ return "", "", fmt.Errorf("error validating database config: %w", err)
}
dbType = d.DbBackend
switch dbType {
case MySQLBackend:
uri, err = d.MySQL.ConnectionString()
if err != nil {
- return "", "", errors.Wrap(err, "fetching mysql connection string")
+ return "", "", fmt.Errorf("error fetching mysql connection string: %w", err)
}
case SQLiteBackend:
uri, err = d.SQLite.ConnectionString()
if err != nil {
- return "", "", errors.Wrap(err, "fetching sqlite3 connection string")
+ return "", "", fmt.Errorf("error fetching sqlite3 connection string: %w", err)
}
default:
return "", "", fmt.Errorf("invalid database backend: %s", dbType)
@@ -303,11 +533,11 @@ func (d *Database) Validate() error {
switch d.DbBackend {
case MySQLBackend:
if err := d.MySQL.Validate(); err != nil {
- return errors.Wrap(err, "validating mysql config")
+ return fmt.Errorf("validating mysql config: %w", err)
}
case SQLiteBackend:
if err := d.SQLite.Validate(); err != nil {
- return errors.Wrap(err, "validating sqlite3 config")
+ return fmt.Errorf("validating sqlite3 config: %w", err)
}
default:
return fmt.Errorf("invalid database backend: %s", d.DbBackend)
@@ -317,7 +547,8 @@ func (d *Database) Validate() error {
// SQLite is the config entry for the sqlite3 section
type SQLite struct {
- DBFile string `toml:"db_file" json:"db-file"`
+ DBFile string `toml:"db_file" json:"db-file"`
+ BusyTimeoutSeconds int `toml:"busy_timeout_seconds" json:"busy-timeout-seconds"`
}
func (s *SQLite) Validate() error {
@@ -331,13 +562,18 @@ func (s *SQLite) Validate() error {
parent := filepath.Dir(s.DBFile)
if _, err := os.Stat(parent); err != nil {
- return errors.Wrapf(err, "accessing db_file parent dir: %s", parent)
+ return fmt.Errorf("parent directory of db_file does not exist: %w", err)
}
return nil
}
func (s *SQLite) ConnectionString() (string, error) {
- return s.DBFile, nil
+ connectionString := fmt.Sprintf("%s?_journal_mode=WAL&_foreign_keys=ON", s.DBFile)
+ if s.BusyTimeoutSeconds > 0 {
+ timeout := s.BusyTimeoutSeconds * 1000
+ connectionString = fmt.Sprintf("%s&_busy_timeout=%d", connectionString, timeout)
+ }
+ return connectionString, nil
}
// MySQL is the config entry for the mysql section
@@ -391,8 +627,54 @@ func (t *TLSConfig) Validate() error {
}
type Metrics struct {
+ // DisableAuth defines if the API endpoint will be protected by
+ // JWT authentication
DisableAuth bool `toml:"disable_auth" json:"disable-auth"`
- Enable bool `toml:"enable" json:"enable"`
+ // Enable define if the API endpoint for metrics collection will
+ // be enabled
+ Enable bool `toml:"enable" json:"enable"`
+ // Period defines the internal period at which internal metrics are getting updated
+ // and propagated to the /metrics endpoint
+ Period time.Duration `toml:"period" json:"period"`
+}
+
+// ParseDuration parses the configured duration and returns a time.Duration of 0
+// if the duration is invalid.
+func (m *Metrics) ParseDuration() (time.Duration, error) {
+ duration, err := time.ParseDuration(fmt.Sprint(m.Period))
+ if err != nil {
+ return 0, err
+ }
+ return duration, nil
+}
+
+// Duration returns the configured duration or the default duration if no value
+// is configured or the configured value is invalid.
+func (m *Metrics) Duration() time.Duration {
+ duration, err := m.ParseDuration()
+ if err != nil {
+ slog.With(slog.Any("error", err)).Error(fmt.Sprintf("defined duration %s is invalid", m.Period))
+ }
+ if duration == 0 {
+ slog.Debug(fmt.Sprintf("using default duration %s for metrics update interval", appdefaults.DefaultMetricsUpdateInterval))
+ return appdefaults.DefaultMetricsUpdateInterval
+ }
+ return duration
+}
+
+// WebUI holds configuration for the web UI
+type WebUI struct {
+ EnableWebUI bool `toml:"enable" json:"enable"`
+}
+
+// Validate validates the WebUI config
+func (w *WebUI) Validate() error {
+ return nil
+}
+
+// GetWebappPath returns the webapp path with proper formatting
+func (w *WebUI) GetWebappPath() string {
+ return "/ui/"
}
// APIServer holds configuration for the API server
@@ -403,6 +685,7 @@ type APIServer struct {
UseTLS bool `toml:"use_tls" json:"use-tls"`
TLSConfig TLSConfig `toml:"tls" json:"tls"`
CORSOrigins []string `toml:"cors_origins" json:"cors-origins"`
+ WebUI WebUI `toml:"webui" json:"webui"`
}
// BindAddress returns a host:port string.
@@ -414,7 +697,7 @@ func (a *APIServer) BindAddress() string {
func (a *APIServer) Validate() error {
if a.UseTLS {
if err := a.TLSConfig.Validate(); err != nil {
- return errors.Wrap(err, "TLS validation failed")
+ return fmt.Errorf("invalid tls config: %w", err)
}
}
if a.Port > 65535 || a.Port < 1 {
@@ -428,6 +711,11 @@ func (a *APIServer) Validate() error {
// when we try to bind to it.
return fmt.Errorf("invalid IP address")
}
+
+ if err := a.WebUI.Validate(); err != nil {
+ return fmt.Errorf("invalid webui config: %w", err)
+ }
+
return nil
}
@@ -444,9 +732,10 @@ func (d *timeToLive) ParseDuration() (time.Duration, error) {
func (d *timeToLive) Duration() time.Duration {
duration, err := d.ParseDuration()
if err != nil {
- log.Printf("failed to parse duration: %s", err)
+ slog.With(slog.Any("error", err)).Error("failed to parse duration")
return appdefaults.DefaultJWTTTL
}
+ // nolint:golangci-lint,godox
// TODO(gabriel-samfira): should we have a minimum TTL?
if duration < appdefaults.DefaultJWTTTL {
return appdefaults.DefaultJWTTTL
@@ -458,7 +747,7 @@ func (d *timeToLive) Duration() time.Duration {
func (d *timeToLive) UnmarshalText(text []byte) error {
_, err := time.ParseDuration(string(text))
if err != nil {
- return errors.Wrap(err, "parsing time_to_live")
+ return fmt.Errorf("invalid duration: %w", err)
}
*d = timeToLive(text)
@@ -474,7 +763,7 @@ type JWTAuth struct {
// Validate validates the JWTAuth config
func (j *JWTAuth) Validate() error {
if _, err := j.TimeToLive.ParseDuration(); err != nil {
- return errors.Wrap(err, "parsing duration")
+ return fmt.Errorf("invalid time_to_live: %w", err)
}
if j.Secret == "" {
diff --git a/config/config_test.go b/config/config_test.go
index ac13ce7f..bbf9e299 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -15,24 +15,27 @@
package config
import (
+ "context"
"os"
"path/filepath"
"testing"
"time"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/util/appdefaults"
+ "github.com/bradleyfalzon/ghinstallation/v2"
"github.com/stretchr/testify/require"
+ "golang.org/x/oauth2"
+
+ "github.com/cloudbase/garm/util/appdefaults"
)
var (
+ // nolint: golangci-lint,gosec
EncryptionPassphrase = "bocyasicgatEtenOubwonIbsudNutDom"
WeakEncryptionPassphrase = "1234567890abcdefghijklmnopqrstuv"
)
func getDefaultSectionConfig(configDir string) Default {
return Default{
- ConfigDir: configDir,
CallbackURL: "https://garm.example.com/",
MetadataURL: "https://garm.example.com/api/v1/metadata",
LogFile: filepath.Join(configDir, "garm.log"),
@@ -77,15 +80,7 @@ func getDefaultDatabaseConfig(dir string) Database {
}
func getDefaultProvidersConfig() []Provider {
- lxdConfig := getDefaultLXDConfig()
- return []Provider{
- {
- Name: "test_lxd",
- ProviderType: params.LXDProvider,
- Description: "test LXD provider",
- LXD: lxdConfig,
- },
- }
+ return []Provider{}
}
func getDefaultGithubConfig() []Github {
@@ -148,40 +143,20 @@ func TestDefaultSectionConfig(t *testing.T) {
errString: "",
},
{
- name: "CallbackURL cannot be empty",
+ name: "CallbackURL must be valid if set",
cfg: Default{
- CallbackURL: "",
+ CallbackURL: "bogus_url",
MetadataURL: cfg.MetadataURL,
- ConfigDir: cfg.ConfigDir,
},
- errString: "missing callback_url",
+ errString: "invalid callback_url",
},
{
- name: "MetadataURL cannot be empty",
+ name: "MetadataURL must be valid if set",
cfg: Default{
CallbackURL: cfg.CallbackURL,
- MetadataURL: "",
- ConfigDir: cfg.ConfigDir,
+ MetadataURL: "bogus_url",
},
- errString: "missing metadata-url",
- },
- {
- name: "ConfigDir cannot be empty",
- cfg: Default{
- CallbackURL: cfg.CallbackURL,
- MetadataURL: cfg.MetadataURL,
- ConfigDir: "",
- },
- errString: "config_dir cannot be empty",
- },
- {
- name: "config_dir must exist and be accessible",
- cfg: Default{
- CallbackURL: cfg.CallbackURL,
- MetadataURL: cfg.MetadataURL,
- ConfigDir: "/i/do/not/exist",
- },
- errString: "accessing config dir: stat /i/do/not/exist:.*",
+ errString: "invalid metadata_url",
},
}
@@ -259,7 +234,7 @@ func TestValidateAPIServerConfig(t *testing.T) {
TLSConfig: TLSConfig{},
UseTLS: true,
},
- errString: "TLS validation failed:*",
+ errString: "invalid tls config: missing crt or key",
},
{
name: "Skip TLS config validation if UseTLS is false",
@@ -412,7 +387,13 @@ func TestGormParams(t *testing.T) {
dbType, uri, err := cfg.GormParams()
require.Nil(t, err)
require.Equal(t, SQLiteBackend, dbType)
- require.Equal(t, filepath.Join(dir, "garm.db"), uri)
+ require.Equal(t, filepath.Join(dir, "garm.db?_journal_mode=WAL&_foreign_keys=ON"), uri)
+
+ cfg.SQLite.BusyTimeoutSeconds = 5
+ dbType, uri, err = cfg.GormParams()
+ require.Nil(t, err)
+ require.Equal(t, SQLiteBackend, dbType)
+ require.Equal(t, filepath.Join(dir, "garm.db?_journal_mode=WAL&_foreign_keys=ON&_busy_timeout=5000"), uri)
cfg.DbBackend = MySQLBackend
cfg.MySQL = getMySQLDefaultConfig()
@@ -422,7 +403,6 @@ func TestGormParams(t *testing.T) {
require.Nil(t, err)
require.Equal(t, MySQLBackend, dbType)
require.Equal(t, "test:test@tcp(127.0.0.1)/garm?charset=utf8&parseTime=True&loc=Local&timeout=5s", uri)
-
}
func TestSQLiteConfig(t *testing.T) {
@@ -463,7 +443,7 @@ func TestSQLiteConfig(t *testing.T) {
cfg: SQLite{
DBFile: "/i/dont/exist/test.db",
},
- errString: "accessing db_file parent dir:.*no such file or directory",
+ errString: "parent directory of db_file does not exist: stat.*",
},
}
@@ -518,7 +498,7 @@ func TestJWTAuthConfig(t *testing.T) {
Secret: cfg.Secret,
TimeToLive: "bogus",
},
- errString: "parsing duration: time: invalid duration*",
+ errString: "invalid time_to_live: time: invalid duration*",
},
}
@@ -537,7 +517,6 @@ func TestJWTAuthConfig(t *testing.T) {
func TestTimeToLiveDuration(t *testing.T) {
cfg := JWTAuth{
- Secret: EncryptionPassphrase,
TimeToLive: "48h",
}
@@ -560,7 +539,6 @@ func TestNewConfig(t *testing.T) {
require.Nil(t, err)
require.NotNil(t, cfg)
require.Equal(t, "https://garm.example.com/", cfg.Default.CallbackURL)
- require.Equal(t, "./testdata", cfg.Default.ConfigDir)
require.Equal(t, "0.0.0.0", cfg.APIServer.Bind)
require.Equal(t, 9998, cfg.APIServer.Port)
require.Equal(t, false, cfg.APIServer.UseTLS)
@@ -574,31 +552,6 @@ func TestNewConfig(t *testing.T) {
require.Equal(t, timeToLive("48h"), cfg.JWTAuth.TimeToLive)
}
-func TestNewConfigEmptyConfigDir(t *testing.T) {
- dirPath, err := os.MkdirTemp("", "garm-config-test")
- if err != nil {
- t.Fatalf("failed to create temporary directory: %s", err)
- }
- defer os.RemoveAll(dirPath)
- appdefaults.DefaultConfigDir = dirPath
-
- cfg, err := NewConfig("testdata/test-empty-config-dir.toml")
- require.Nil(t, err)
- require.NotNil(t, cfg)
- require.Equal(t, cfg.Default.ConfigDir, dirPath)
- require.Equal(t, "https://garm.example.com/", cfg.Default.CallbackURL)
- require.Equal(t, "0.0.0.0", cfg.APIServer.Bind)
- require.Equal(t, 9998, cfg.APIServer.Port)
- require.Equal(t, false, cfg.APIServer.UseTLS)
- require.Equal(t, DBBackendType("mysql"), cfg.Database.DbBackend)
- require.Equal(t, "test", cfg.Database.MySQL.Username)
- require.Equal(t, "test", cfg.Database.MySQL.Password)
- require.Equal(t, "127.0.0.1", cfg.Database.MySQL.Hostname)
- require.Equal(t, "garm", cfg.Database.MySQL.DatabaseName)
- require.Equal(t, "bocyasicgatEtenOubwonIbsudNutDom", cfg.JWTAuth.Secret)
- require.Equal(t, timeToLive("48h"), cfg.JWTAuth.TimeToLive)
-}
-
func TestNewConfigInvalidTomlPath(t *testing.T) {
cfg, err := NewConfig("this is not a file path")
require.Nil(t, cfg)
@@ -612,3 +565,374 @@ func TestNewConfigInvalidConfig(t *testing.T) {
require.NotNil(t, err)
require.Regexp(t, "validating config", err.Error())
}
+
+func TestGithubConfig(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+
+ tests := []struct {
+ name string
+ cfg Github
+ errString string
+ }{
+ {
+ name: "Config is valid",
+ cfg: cfg[0],
+ errString: "",
+ },
+ {
+ name: "BaseURL is invalid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ BaseURL: "bogus",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ errString: "invalid base_url: parse.*",
+ },
+ {
+ name: "APIBaseURL is invalid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ APIBaseURL: "bogus",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ errString: "invalid api_base_url: parse.*",
+ },
+ {
+ name: "UploadBaseURL is invalid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ UploadBaseURL: "bogus",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ errString: "invalid upload_base_url: parse.*",
+ },
+ {
+ name: "BaseURL is set and is valid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ BaseURL: "https://github.example.com/",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ },
+ {
+ name: "APIBaseURL is set and is valid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ APIBaseURL: "https://github.example.com/api/v3",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ },
+ {
+ name: "UploadBaseURL is set and is valid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ UploadBaseURL: "https://github.example.com/uploads",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ },
+ {
+ name: "OAuth2Token is empty",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ },
+ errString: "missing github oauth2 token",
+ },
+ {
+ name: "Name is empty",
+ cfg: Github{
+ Name: "",
+ Description: "dummy github credentials",
+ OAuth2Token: "bogus",
+ },
+ errString: "missing credentials name",
+ },
+ {
+ name: "OAuth token is set in the PAT section",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ },
+ },
+ {
+ name: "OAuth token is empty in the PAT section",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "",
+ },
+ },
+ errString: "missing github oauth2 token",
+ },
+ {
+ name: "Valid App section",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "../testdata/certs/srv-key.pem",
+ },
+ },
+ },
+ {
+ name: "AppID is missing",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 0,
+ InstallationID: 99,
+ PrivateKeyPath: "../testdata/certs/srv-key.pem",
+ },
+ },
+ errString: "missing app_id",
+ },
+ {
+ name: "InstallationID is missing",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 0,
+ PrivateKeyPath: "../testdata/certs/srv-key.pem",
+ },
+ },
+ errString: "missing installation_id",
+ },
+ {
+ name: "PrivateKeyPath is missing",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "",
+ },
+ },
+ errString: "missing private_key_path",
+ },
+ {
+ name: "PrivateKeyPath is invalid",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "/i/dont/exist",
+ },
+ },
+ errString: "invalid github app config: error accessing private_key_path: stat /i/dont/exist: no such file or directory",
+ },
+ {
+ name: "PrivateKeyPath is not a valid RSA private key",
+ cfg: Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "../testdata/certs/srv-pub.pem",
+ },
+ },
+ errString: "invalid github app config: parsing private_key_path: asn1: structure error:.*",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ err := tc.cfg.Validate()
+ if tc.errString == "" {
+ require.Nil(t, err)
+ } else {
+ require.NotNil(t, err)
+ require.Regexp(t, tc.errString, err.Error())
+ }
+ })
+ }
+}
+
+func TestGithubAPIEndpoint(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+
+ require.Equal(t, "https://api.github.com/", cfg[0].APIEndpoint())
+}
+
+func TestGithubAPIEndpointIsSet(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+ cfg[0].APIBaseURL = "https://github.example.com/api/v3"
+
+ require.Equal(t, "https://github.example.com/api/v3", cfg[0].APIEndpoint())
+}
+
+func TestUploadEndpoint(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+
+ require.Equal(t, "https://uploads.github.com/", cfg[0].UploadEndpoint())
+}
+
+func TestUploadEndpointIsSet(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+ cfg[0].UploadBaseURL = "https://github.example.com/uploads"
+
+ require.Equal(t, "https://github.example.com/uploads", cfg[0].UploadEndpoint())
+}
+
+func TestGithubBaseURL(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+
+ require.Equal(t, "https://github.com", cfg[0].BaseEndpoint())
+}
+
+func TestGithubBaseURLIsSet(t *testing.T) {
+ cfg := getDefaultGithubConfig()
+ cfg[0].BaseURL = "https://github.example.com"
+
+ require.Equal(t, "https://github.example.com", cfg[0].BaseEndpoint())
+}
+
+func TestCACertBundle(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "bogus",
+ CACertBundlePath: "../testdata/certs/srv-pub.pem",
+ }
+
+ cert, err := cfg.CACertBundle()
+ require.Nil(t, err)
+ require.NotNil(t, cert)
+}
+
+func TestCACertBundleInvalidPath(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "bogus",
+ CACertBundlePath: "/i/dont/exist",
+ }
+
+ cert, err := cfg.CACertBundle()
+ require.NotNil(t, err)
+ require.EqualError(t, err, "error accessing ca_cert_bundle: stat /i/dont/exist: no such file or directory")
+ require.Nil(t, cert)
+}
+
+func TestCACertBundleInvalidFile(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "bogus",
+ CACertBundlePath: "../testdata/config.toml",
+ }
+
+ cert, err := cfg.CACertBundle()
+ require.NotNil(t, err)
+ require.EqualError(t, err, "failed to parse CA cert bundle")
+ require.Nil(t, cert)
+}
+
+func TestGithubHTTPClientDeprecatedPAT(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ OAuth2Token: "bogus",
+ }
+
+ client, err := cfg.HTTPClient(context.Background())
+ require.Nil(t, err)
+ require.NotNil(t, client)
+
+ transport, ok := client.Transport.(*oauth2.Transport)
+ require.True(t, ok)
+ require.NotNil(t, transport)
+}
+
+func TestGithubHTTPClientPAT(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ AuthType: GithubAuthTypePAT,
+ PAT: GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ }
+
+ client, err := cfg.HTTPClient(context.Background())
+ require.Nil(t, err)
+ require.NotNil(t, client)
+
+ transport, ok := client.Transport.(*oauth2.Transport)
+ require.True(t, ok)
+ require.NotNil(t, transport)
+}
+
+func TestGithubHTTPClientApp(t *testing.T) {
+ cfg := Github{
+ Name: "dummy_creds",
+ Description: "dummy github credentials",
+ AuthType: GithubAuthTypeApp,
+ App: GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "../testdata/certs/srv-key.pem",
+ },
+ }
+
+ client, err := cfg.HTTPClient(context.Background())
+ require.Nil(t, err)
+ require.NotNil(t, client)
+
+ transport, ok := client.Transport.(*ghinstallation.Transport)
+ require.True(t, ok)
+ require.NotNil(t, transport)
+}
diff --git a/config/external.go b/config/external.go
index c6195dcb..ca98bdfb 100644
--- a/config/external.go
+++ b/config/external.go
@@ -18,10 +18,9 @@ import (
"fmt"
"os"
"path/filepath"
+ "strings"
- "github.com/cloudbase/garm/util/exec"
-
- "github.com/pkg/errors"
+ "github.com/cloudbase/garm-provider-common/util/exec"
)
// External represents the config for an external provider.
@@ -30,6 +29,10 @@ import (
// whatever programming language you wish, while still remaining compatible
// with garm.
type External struct {
+ // InterfaceVersion is the version of the interface that the external
+ // provider implements. This is used to ensure compatibility between
+ // the external provider and garm.
+ InterfaceVersion string `toml:"interface_version" json:"interface-version"`
// ConfigFile is the path on disk to a file which will be passed to
// the external binary as an environment variable: GARM_PROVIDER_CONFIG
// You can use this file for any configuration you need to do for the
@@ -42,6 +45,25 @@ type External struct {
// the provider. If specified, it will take precedence over the "garm-external-provider"
// executable in the ProviderDir.
ProviderExecutable string `toml:"provider_executable" json:"provider-executable"`
+ // EnvironmentVariables is a list of environment variable names that will be
+ // passed to the external binary together with their values.
+ EnvironmentVariables []string `toml:"environment_variables" json:"environment-variables"`
+}
+
+func (e *External) GetEnvironmentVariables() []string {
+ envVars := []string{}
+
+ for _, configuredEnvVars := range e.EnvironmentVariables {
+ // discover environment variables
+ for _, k := range os.Environ() {
+ variable := strings.SplitN(k, "=", 2)
+ if strings.HasPrefix(variable[0], configuredEnvVars) &&
+ !strings.HasPrefix(variable[0], EnvironmentVariablePrefix) {
+ envVars = append(envVars, k)
+ }
+ }
+ }
+ return envVars
}
func (e *External) ExecutablePath() (string, error) {
@@ -68,10 +90,10 @@ func (e *External) Validate() error {
execPath, err := e.ExecutablePath()
if err != nil {
- return errors.Wrap(err, "fetching executable path")
+ return fmt.Errorf("failed to get executable path: %w", err)
}
if _, err := os.Stat(execPath); err != nil {
- return errors.Wrap(err, "checking provider executable")
+ return fmt.Errorf("failed to access external provider binary %s", execPath)
}
if !exec.IsExecutable(execPath) {
return fmt.Errorf("external provider binary %s is not executable", execPath)
diff --git a/config/external_test.go b/config/external_test.go
index 1da36d33..68ca3636 100644
--- a/config/external_test.go
+++ b/config/external_test.go
@@ -18,6 +18,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "slices"
"testing"
"github.com/stretchr/testify/require"
@@ -30,7 +31,8 @@ func getDefaultExternalConfig(t *testing.T) External {
}
t.Cleanup(func() { os.RemoveAll(dir) })
- err = os.WriteFile(filepath.Join(dir, "garm-external-provider"), []byte{}, 0755)
+ // nolint:golangci-lint,gosec
+ err = os.WriteFile(filepath.Join(dir, "garm-external-provider"), []byte{}, 0o755)
if err != nil {
t.Fatalf("failed to write file: %s", err)
}
@@ -76,7 +78,7 @@ func TestExternal(t *testing.T) {
ConfigFile: "",
ProviderDir: "../test",
},
- errString: "fetching executable path: executable path must be an absolute path",
+ errString: "failed to get executable path: executable path must be an absolute path",
},
{
name: "Provider executable path must be absolute",
@@ -84,7 +86,7 @@ func TestExternal(t *testing.T) {
ConfigFile: "",
ProviderExecutable: "../test",
},
- errString: "fetching executable path: executable path must be an absolute path",
+ errString: "failed to get executable path: executable path must be an absolute path",
},
{
name: "Provider executable not found",
@@ -92,7 +94,7 @@ func TestExternal(t *testing.T) {
ConfigFile: "",
ProviderDir: "/tmp",
},
- errString: "checking provider executable: stat /tmp/garm-external-provider: no such file or directory",
+ errString: "failed to access external provider binary /tmp/garm-external-provider",
},
}
@@ -121,3 +123,101 @@ func TestProviderExecutableIsExecutable(t *testing.T) {
require.NotNil(t, err)
require.EqualError(t, err, fmt.Sprintf("external provider binary %s is not executable", execPath))
}
+
+func TestExternalEnvironmentVariables(t *testing.T) {
+ cfg := getDefaultExternalConfig(t)
+
+ tests := []struct {
+ name string
+ cfg External
+ expectedEnvironmentVariables []string
+ environmentVariables map[string]string
+ }{
+ {
+ name: "Provider with no additional environment variables",
+ cfg: cfg,
+ expectedEnvironmentVariables: []string{},
+ environmentVariables: map[string]string{
+ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "PROVIDER_LOG_LEVEL": "debug",
+ "PROVIDER_TIMEOUT": "30",
+ "PROVIDER_RETRY_COUNT": "3",
+ "INFRA_REGION": "us-east-1",
+ },
+ },
+ {
+ name: "Provider with additional environment variables",
+ cfg: External{
+ ConfigFile: "",
+ ProviderDir: "../test",
+ EnvironmentVariables: []string{
+ "PROVIDER_",
+ "INFRA_REGION",
+ },
+ },
+ environmentVariables: map[string]string{
+ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "PROVIDER_LOG_LEVEL": "debug",
+ "PROVIDER_TIMEOUT": "30",
+ "PROVIDER_RETRY_COUNT": "3",
+ "INFRA_REGION": "us-east-1",
+ "GARM_POOL_ID": "f3b21376-e189-43ae-a1bd-7a3ffee57a58",
+ },
+ expectedEnvironmentVariables: []string{
+ "PROVIDER_LOG_LEVEL=debug",
+ "PROVIDER_TIMEOUT=30",
+ "PROVIDER_RETRY_COUNT=3",
+ "INFRA_REGION=us-east-1",
+ },
+ },
+ {
+ name: "GARM variables are getting ignored",
+ cfg: External{
+ ConfigFile: "",
+ ProviderDir: "../test",
+ EnvironmentVariables: []string{
+ "PROVIDER_",
+ "INFRA_REGION",
+ "GARM_SERVER",
+ },
+ },
+ environmentVariables: map[string]string{
+ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "PROVIDER_LOG_LEVEL": "debug",
+ "PROVIDER_TIMEOUT": "30",
+ "PROVIDER_RETRY_COUNT": "3",
+ "INFRA_REGION": "us-east-1",
+ "GARM_POOL_ID": "f3b21376-e189-43ae-a1bd-7a3ffee57a58",
+ "GARM_SERVER_SHUTDOWN": "true",
+ "GARM_SERVER_INSECURE": "true",
+ },
+ expectedEnvironmentVariables: []string{
+ "PROVIDER_LOG_LEVEL=debug",
+ "PROVIDER_TIMEOUT=30",
+ "PROVIDER_RETRY_COUNT=3",
+ "INFRA_REGION=us-east-1",
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ // set environment variables
+ for k, v := range tc.environmentVariables {
+ err := os.Setenv(k, v)
+ if err != nil {
+ t.Fatalf("failed to set environment variable: %s", err)
+ }
+ }
+
+ envVars := tc.cfg.GetEnvironmentVariables()
+
+ // sort slices to make them comparable
+ slices.Sort(envVars)
+ slices.Sort(tc.expectedEnvironmentVariables)
+
+ // compare slices
+ require.Equal(t, tc.expectedEnvironmentVariables, envVars)
+ })
+ }
+}
diff --git a/config/lxd.go b/config/lxd.go
deleted file mode 100644
index 8b8b1f7e..00000000
--- a/config/lxd.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package config
-
-import (
- "fmt"
- "net/url"
- "os"
-
- "github.com/pkg/errors"
-)
-
-type LXDRemoteProtocol string
-type LXDImageType string
-
-func (l LXDImageType) String() string {
- return string(l)
-}
-
-const (
- SimpleStreams LXDRemoteProtocol = "simplestreams"
- LXDImageVirtualMachine LXDImageType = "virtual-machine"
- LXDImageContainer LXDImageType = "container"
-)
-
-// LXDImageRemote holds information about a remote server from which LXD can fetch
-// OS images. Typically this will be a simplestreams server.
-type LXDImageRemote struct {
- Address string `toml:"addr" json:"addr"`
- Public bool `toml:"public" json:"public"`
- Protocol LXDRemoteProtocol `toml:"protocol" json:"protocol"`
- InsecureSkipVerify bool `toml:"skip_verify" json:"skip-verify"`
-}
-
-func (l *LXDImageRemote) Validate() error {
- if l.Protocol != SimpleStreams {
- // Only supports simplestreams for now.
- return fmt.Errorf("invalid remote protocol %s. Supported protocols: %s", l.Protocol, SimpleStreams)
- }
- if l.Address == "" {
- return fmt.Errorf("missing address")
- }
-
- url, err := url.ParseRequestURI(l.Address)
- if err != nil {
- return errors.Wrap(err, "validating address")
- }
-
- if url.Scheme != "http" && url.Scheme != "https" {
- return fmt.Errorf("address must be http or https")
- }
-
- return nil
-}
-
-// LXD holds connection information for an LXD cluster.
-type LXD struct {
- // UnixSocket is the path on disk to the LXD unix socket. If defined,
- // this is prefered over connecting via HTTPs.
- UnixSocket string `toml:"unix_socket_path" json:"unix-socket-path"`
-
- // Project name is the name of the project in which this runner will create
- // instances. If this option is not set, the default project will be used.
- // The project used here, must have all required profiles created by you
- // beforehand. For LXD, the "flavor" used in the runner definition for a pool
- // equates to a profile in the desired project.
- ProjectName string `toml:"project_name" json:"project-name"`
-
- // IncludeDefaultProfile specifies whether or not this provider will always add
- // the "default" profile to any newly created instance.
- IncludeDefaultProfile bool `toml:"include_default_profile" json:"include-default-profile"`
-
- // URL holds the URL of the remote LXD server.
- // example: https://10.10.10.1:8443/
- URL string `toml:"url" json:"url"`
- // ClientCertificate is the x509 client certificate path used for authentication.
- ClientCertificate string `toml:"client_certificate" json:"client_certificate"`
- // ClientKey is the key used for client certificate authentication.
- ClientKey string `toml:"client_key" json:"client-key"`
- // TLS certificate of the remote server. If not specified, the system CA is used.
- TLSServerCert string `toml:"tls_server_certificate" json:"tls-server-certificate"`
- // TLSCA is the TLS CA certificate when running LXD in PKI mode.
- TLSCA string `toml:"tls_ca" json:"tls-ca"`
-
- // ImageRemotes is a map to a set of remote image repositories we can use to
- // download images.
- ImageRemotes map[string]LXDImageRemote `toml:"image_remotes" json:"image-remotes"`
-
- // SecureBoot enables secure boot for VMs spun up using this provider.
- SecureBoot bool `toml:"secure_boot" json:"secure-boot"`
-
- // InstanceType allows you to choose between a virtual machine and a container
- InstanceType LXDImageType `toml:"instance_type" json:"instance-type"`
-}
-
-func (l *LXD) GetInstanceType() LXDImageType {
- switch l.InstanceType {
- case LXDImageVirtualMachine, LXDImageContainer:
- return l.InstanceType
- default:
- return LXDImageVirtualMachine
- }
-}
-
-func (l *LXD) Validate() error {
- if l.UnixSocket != "" {
- if _, err := os.Stat(l.UnixSocket); err != nil {
- return fmt.Errorf("could not access unix socket %s: %q", l.UnixSocket, err)
- }
-
- return nil
- }
-
- if l.URL == "" {
- return fmt.Errorf("unix_socket or address must be specified")
- }
-
- url, err := url.ParseRequestURI(l.URL)
- if err != nil {
- return fmt.Errorf("invalid LXD URL")
- }
-
- if url.Scheme != "https" {
- return fmt.Errorf("address must be https")
- }
-
- if l.ClientCertificate == "" || l.ClientKey == "" {
- return fmt.Errorf("client_certificate and client_key are mandatory")
- }
-
- if _, err := os.Stat(l.ClientCertificate); err != nil {
- return fmt.Errorf("failed to access client certificate %s: %q", l.ClientCertificate, err)
- }
-
- if _, err := os.Stat(l.ClientKey); err != nil {
- return fmt.Errorf("failed to access client key %s: %q", l.ClientKey, err)
- }
-
- if l.TLSServerCert != "" {
- if _, err := os.Stat(l.TLSServerCert); err != nil {
- return fmt.Errorf("failed to access tls_server_certificate %s: %q", l.TLSServerCert, err)
- }
- }
-
- for name, val := range l.ImageRemotes {
- if err := val.Validate(); err != nil {
- return fmt.Errorf("remote %s is invalid: %s", name, err)
- }
- }
- return nil
-}
diff --git a/config/lxd_test.go b/config/lxd_test.go
deleted file mode 100644
index 1bba515d..00000000
--- a/config/lxd_test.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package config
-
-import (
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-func getDefaultLXDImageRemoteConfig() LXDImageRemote {
- return LXDImageRemote{
- Address: "https://cloud-images.ubuntu.com/releases",
- Public: true,
- Protocol: SimpleStreams,
- InsecureSkipVerify: false,
- }
-}
-
-func getDefaultLXDConfig() LXD {
- remote := getDefaultLXDImageRemoteConfig()
- return LXD{
- URL: "https://example.com:8443",
- ProjectName: "default",
- IncludeDefaultProfile: false,
- ClientCertificate: "../testdata/lxd/certs/client.crt",
- ClientKey: "../testdata/lxd/certs/client.key",
- TLSServerCert: "../testdata/lxd/certs/servercert.crt",
- ImageRemotes: map[string]LXDImageRemote{
- "default": remote,
- },
- SecureBoot: false,
- }
-}
-
-func TestLXDRemote(t *testing.T) {
- cfg := getDefaultLXDImageRemoteConfig()
-
- err := cfg.Validate()
- require.Nil(t, err)
-}
-
-func TestLXDRemoteEmptyAddress(t *testing.T) {
- cfg := getDefaultLXDImageRemoteConfig()
-
- cfg.Address = ""
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "missing address")
-}
-
-func TestLXDRemoteInvalidAddress(t *testing.T) {
- cfg := getDefaultLXDImageRemoteConfig()
-
- cfg.Address = "bogus address"
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "validating address: parse \"bogus address\": invalid URI for request")
-}
-
-func TestLXDRemoteIvalidAddressScheme(t *testing.T) {
- cfg := getDefaultLXDImageRemoteConfig()
-
- cfg.Address = "ftp://whatever"
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "address must be http or https")
-}
-
-func TestLXDConfig(t *testing.T) {
- cfg := getDefaultLXDConfig()
- err := cfg.Validate()
- require.Nil(t, err)
-}
-
-func TestLXDWithInvalidUnixSocket(t *testing.T) {
- cfg := getDefaultLXDConfig()
-
- cfg.UnixSocket = "bogus unix socket"
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "could not access unix socket bogus unix socket: \"stat bogus unix socket: no such file or directory\"")
-}
-
-func TestMissingUnixSocketAndMissingURL(t *testing.T) {
- cfg := getDefaultLXDConfig()
-
- cfg.URL = ""
- cfg.UnixSocket = ""
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "unix_socket or address must be specified")
-}
-
-func TestInvalidLXDURL(t *testing.T) {
- cfg := getDefaultLXDConfig()
- cfg.URL = "bogus"
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "invalid LXD URL")
-}
-
-func TestLXDURLIsHTTPS(t *testing.T) {
- cfg := getDefaultLXDConfig()
- cfg.URL = "http://example.com"
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "address must be https")
-}
-
-func TestMissingClientCertOrKey(t *testing.T) {
- cfg := getDefaultLXDConfig()
- cfg.ClientKey = ""
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "client_certificate and client_key are mandatory")
-
- cfg = getDefaultLXDConfig()
- cfg.ClientCertificate = ""
- err = cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "client_certificate and client_key are mandatory")
-}
-
-func TestLXDIvalidCertOrKeyPaths(t *testing.T) {
- cfg := getDefaultLXDConfig()
- cfg.ClientCertificate = "/i/am/not/here"
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "failed to access client certificate /i/am/not/here: \"stat /i/am/not/here: no such file or directory\"")
-
- cfg.ClientCertificate = "../testdata/lxd/certs/client.crt"
- cfg.ClientKey = "/me/neither"
-
- err = cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "failed to access client key /me/neither: \"stat /me/neither: no such file or directory\"")
-}
-
-func TestLXDInvalidServerCertPath(t *testing.T) {
- cfg := getDefaultLXDConfig()
- cfg.TLSServerCert = "/not/a/valid/server/cert/path"
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "failed to access tls_server_certificate /not/a/valid/server/cert/path: \"stat /not/a/valid/server/cert/path: no such file or directory\"")
-}
-
-func TestInvalidLXDImageRemotes(t *testing.T) {
- cfg := getDefaultLXDConfig()
-
- cfg.ImageRemotes["default"] = LXDImageRemote{
- Protocol: LXDRemoteProtocol("bogus"),
- }
-
- err := cfg.Validate()
- require.NotNil(t, err)
- require.EqualError(t, err, "remote default is invalid: invalid remote protocol bogus. Supported protocols: simplestreams")
-}
diff --git a/contrib/garm.service b/contrib/garm.service
index d0ead1f5..5a4e6082 100644
--- a/contrib/garm.service
+++ b/contrib/garm.service
@@ -5,6 +5,7 @@ After=multi-user.target
[Service]
Type=simple
ExecStart=/usr/local/bin/garm -config /etc/garm/config.toml
+ExecReload=/bin/kill -HUP $MAINPID
Restart=always
RestartSec=5s
User=garm
diff --git a/contrib/providers.d/azure/cloudconfig/install_runner.tpl b/contrib/providers.d/azure/cloudconfig/install_runner.tpl
deleted file mode 100644
index 910d8eac..00000000
--- a/contrib/providers.d/azure/cloudconfig/install_runner.tpl
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/bin/bash
-
-set -e
-set -o pipefail
-
-METADATA_URL="GARM_METADATA_URL"
-CALLBACK_URL="GARM_CALLBACK_URL"
-BEARER_TOKEN="GARM_CALLBACK_TOKEN"
-DOWNLOAD_URL="GH_DOWNLOAD_URL"
-DOWNLOAD_TOKEN="GH_TEMP_DOWNLOAD_TOKEN"
-FILENAME="GH_FILENAME"
-TARGET_URL="GH_TARGET_URL"
-RUNNER_NAME="GH_RUNNER_NAME"
-RUNNER_LABELS="GH_RUNNER_LABELS"
-TEMP_TOKEN=""
-
-
-if [ -z "$METADATA_URL" ];then
- echo "no token is available and METADATA_URL is not set"
- exit 1
-fi
-
-function call() {
- PAYLOAD="$1"
- curl --fail -s -X POST -d "${PAYLOAD}" -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${CALLBACK_URL}" || echo "failed to call home: exit code ($?)"
-}
-
-function sendStatus() {
- MSG="$1"
- call "{\"status\": \"installing\", \"message\": \"$MSG\"}"
-}
-
-function success() {
- MSG="$1"
- ID=$2
- call "{\"status\": \"idle\", \"message\": \"$MSG\", \"agent_id\": $ID}"
-}
-
-function fail() {
- MSG="$1"
- call "{\"status\": \"failed\", \"message\": \"$MSG\"}"
- exit 1
-}
-
-if [ ! -z "$DOWNLOAD_TOKEN" ]; then
- TEMP_TOKEN="Authorization: Bearer $DOWNLOAD_TOKEN"
-fi
-
-sendStatus "downloading tools from ${DOWNLOAD_URL}"
-curl --fail -L -H "${TEMP_TOKEN}" -o "/home/runner/${FILENAME}" "${DOWNLOAD_URL}" || fail "failed to download tools"
-
-mkdir -p /home/runner/actions-runner || fail "failed to create actions-runner folder"
-
-sendStatus "extracting runner"
-tar xf "/home/runner/${FILENAME}" -C /home/runner/actions-runner/ || fail "failed to extract runner"
-chown runner:runner -R /home/runner/actions-runner/ || fail "failed to change owner"
-
-sendStatus "installing dependencies"
-cd /home/runner/actions-runner
-sudo ./bin/installdependencies.sh || fail "failed to install dependencies"
-
-sendStatus "fetching runner registration token"
-GITHUB_TOKEN=$(curl --fail -s -X GET -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${METADATA_URL}" || fail "failed to get runner registration token")
-
-sendStatus "configuring runner"
-sudo -u runner -- ./config.sh --unattended --url "${TARGET_URL}" --token "${GITHUB_TOKEN}" --name "${RUNNER_NAME}" --labels "${RUNNER_LABELS}" --ephemeral || fail "failed to configure runner"
-
-sendStatus "installing runner service"
-./svc.sh install runner || fail "failed to install service"
-
-sendStatus "starting service"
-./svc.sh start || fail "failed to start service"
-
-set +e
-AGENT_ID=$(grep "agentId" /home/runner/actions-runner/.runner | tr -d -c 0-9)
-if [ $? -ne 0 ];then
- fail "failed to get agent ID"
-fi
-set -e
-
-success "runner successfully installed" $AGENT_ID
\ No newline at end of file
diff --git a/contrib/providers.d/azure/cloudconfig/userdata.tpl b/contrib/providers.d/azure/cloudconfig/userdata.tpl
deleted file mode 100644
index 10ef2b51..00000000
--- a/contrib/providers.d/azure/cloudconfig/userdata.tpl
+++ /dev/null
@@ -1,31 +0,0 @@
-#cloud-config
-package_upgrade: true
-packages:
- - curl
- - tar
-system_info:
- default_user:
- name: runner
- home: /home/runner
- shell: /bin/bash
- groups:
- - sudo
- - adm
- - cdrom
- - dialout
- - dip
- - video
- - plugdev
- - netdev
- - docker
- - lxd
- sudo: ALL=(ALL) NOPASSWD:ALL
-runcmd:
- - /install_runner.sh
- - rm -f /install_runner.sh
-write_files:
- - encoding: b64
- content: RUNNER_INSTALL_B64
- owner: root:root
- path: /install_runner.sh
- permissions: "755"
diff --git a/contrib/providers.d/azure/config.sh b/contrib/providers.d/azure/config.sh
deleted file mode 100644
index f99f42ac..00000000
--- a/contrib/providers.d/azure/config.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-# Azure service principal credentials
-export AZURE_SUBSCRIPTION_ID=""
-export AZURE_TENANT_ID=""
-export AZURE_CLIENT_ID=""
-export AZURE_CLIENT_SECRET=""
-
-# GARM config
-export LOCATION="westeurope"
diff --git a/contrib/providers.d/azure/garm-external-provider b/contrib/providers.d/azure/garm-external-provider
deleted file mode 100755
index 7974f40f..00000000
--- a/contrib/providers.d/azure/garm-external-provider
+++ /dev/null
@@ -1,370 +0,0 @@
-#!/bin/bash
-
-set -e
-set -o pipefail
-
-if [ ! -t 0 ]
-then
- INPUT=$(cat -)
-fi
-MYPATH=$(realpath ${BASH_SOURCE[0]})
-MYDIR=$(dirname "${MYPATH}")
-TEMPLATES="$MYDIR/cloudconfig"
-
-# Defaults
-LOCATION=${LOCATION:"westeurope"}
-
-# END Defaults
-
-if [ -z "$GARM_PROVIDER_CONFIG_FILE" ]
-then
- echo "no config file specified in env"
- exit 1
-fi
-
-source "$GARM_PROVIDER_CONFIG_FILE"
-
-declare -A GARM_TO_GH_ARCH_MAP
-GARM_TO_GH_ARCH_MAP["amd64"]="x64"
-GARM_TO_GH_ARCH_MAP["arm"]="arm"
-GARM_TO_GH_ARCH_MAP["arm64"]="arm64"
-
-declare -A AZURE_OS_TO_GH_OS_MAP
-AZURE_OS_TO_GH_OS_MAP["Linux"]="linux"
-AZURE_OS_TO_GH_OS_MAP["Windows"]="win"
-
-# https://docs.microsoft.com/en-us/azure/virtual-machines/states-billing#power-states-and-billing
-declare -A AZURE_POWER_STATE_MAP
-AZURE_POWER_STATE_MAP["VM starting"]="pending_create"
-AZURE_POWER_STATE_MAP["VM running"]="running"
-AZURE_POWER_STATE_MAP["VM stopping"]="stopped"
-AZURE_POWER_STATE_MAP["VM stopped"]="stopped"
-AZURE_POWER_STATE_MAP["VM deallocating"]="stopped"
-AZURE_POWER_STATE_MAP["VM deallocated"]="stopped"
-
-# https://docs.microsoft.com/en-us/azure/virtual-machines/states-billing#provisioning-states
-declare -A AZURE_PROVISION_STATE_MAP
-AZURE_PROVISION_STATE_MAP["Creating"]="pending_create"
-AZURE_PROVISION_STATE_MAP["Updating"]="pending_create"
-AZURE_PROVISION_STATE_MAP["Migrating"]="pending_create"
-AZURE_PROVISION_STATE_MAP["Failed"]="error"
-AZURE_PROVISION_STATE_MAP["Succeeded"]="running"
-AZURE_PROVISION_STATE_MAP["Deleting"]="pending_delete"
-
-function checkValNotNull() {
- if [ -z "$1" -o "$1" == "null" ]; then
- echo "failed to fetch value $2"
- return 1
- fi
- return 0
-}
-
-function requestedArch() {
- ARCH=$(echo "$INPUT" | jq -c -r '.arch')
- checkValNotNull "${ARCH}" "arch" || return $?
- echo "${ARCH}"
-}
-
-function downloadURL() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_OS="${AZURE_OS_TO_GH_OS_MAP[$1]}"
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- URL=$(echo "$INPUT" | jq -c -r --arg OS "$GH_OS" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).download_url')
- checkValNotNull "${URL}" "download URL" || return $?
- echo "${URL}"
-}
-
-function tempDownloadToken() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- TOKEN=$(echo "$INPUT" | jq -c -r --arg OS "$1" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).temp_download_token')
- echo "${TOKEN}"
-}
-
-function runnerTokenURL() {
- METADATA_URL=$(echo "$INPUT" | jq -c -r '."metadata-url"')
- checkValNotNull "${METADATA_URL}" "metadata-url" || return $?
- echo "${METADATA_URL}/runner-registration-token/"
-}
-
-function downloadFilename() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_OS="${AZURE_OS_TO_GH_OS_MAP[$1]}"
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- FN=$(echo "$INPUT" | jq -c -r --arg OS "$GH_OS" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).filename')
- checkValNotNull "${FN}" "download filename" || return $?
- echo "${FN}"
-}
-
-function poolID() {
- POOL_ID=$(echo "$INPUT" | jq -c -r '.pool_id')
- checkValNotNull "${POOL_ID}" "pool_id" || return $?
- echo "${POOL_ID}"
-}
-
-function vmSize() {
- VM_SIZE=$(echo "$INPUT" | jq -c -r '.flavor')
- checkValNotNull "${VM_SIZE}" "flavor" || return $?
- echo "${VM_SIZE}"
-}
-
-function imageUrn() {
- IMG=$(echo "$INPUT" | jq -c -r '.image')
- checkValNotNull "${IMG}" "image" || return $?
- echo "${IMG}"
-}
-
-function getOSImageDetails() {
- IMAGE=$(echo "$INPUT" | jq -r -c '.image')
- IMAGE_DETAILS=$(az vm image show --urn "$IMAGE" -o json --only-show-errors)
- echo "$IMAGE_DETAILS"
-}
-
-function repoURL() {
- REPO=$(echo "$INPUT" | jq -c -r '.repo_url')
- checkValNotNull "${REPO}" "repo_url" || return $?
- echo "${REPO}"
-}
-
-function callbackURL() {
- CB_URL=$(echo "$INPUT" | jq -c -r '."callback-url"')
- checkValNotNull "${CB_URL}" "callback-url" || return $?
- echo "${CB_URL}"
-}
-
-function callbackToken() {
- CB_TK=$(echo "$INPUT" | jq -c -r '."instance-token"')
- checkValNotNull "${CB_TK}" "instance-token" || return $?
- echo "${CB_TK}"
-}
-
-function instanceName() {
- NAME=$(echo "$INPUT" | jq -c -r '.name')
- checkValNotNull "${NAME}" "name" || return $?
- echo "${NAME}"
-}
-
-function labels() {
- LBL=$(echo "$INPUT" | jq -c -r '.labels | join(",")')
- checkValNotNull "${LBL}" "labels" || return $?
- echo "${LBL}"
-}
-
-function vmStatus() {
- [ -z "$1" -o -z "$2" ] && return 1
-
- RG_DETAILS=$(az group show -n "$1" -o json --only-show-errors)
- RG_STATE=$(echo "$RG_DETAILS" | jq -r '.properties.provisioningState')
- STATUS="${AZURE_PROVISION_STATE_MAP[$RG_STATE]}"
- if [[ "$STATUS" != "running" ]]; then
- echo "$STATUS"
- return 0
- fi
- VM_DETAILS=$(az vm show -g "$1" -n "$2" --show-details -o json --only-show-errors)
- VM_STATE=$(echo "$VM_DETAILS" | jq -r '.provisioningState')
- STATUS="${AZURE_PROVISION_STATE_MAP[$VM_STATE]}"
- if [[ "$STATUS" != "running" ]]; then
- echo "$STATUS"
- return 0
- fi
- VM_POWER_STATE=$(echo "$VM_DETAILS" | jq -r '.powerState')
- VM_STATUS="${AZURE_POWER_STATE_MAP[$VM_POWER_STATE]}"
- if [[ -z "${VM_STATUS}" ]]; then
- echo "unknown"
- return 0
- fi
- echo "${VM_STATUS}"
-}
-
-function getCloudConfig() {
- IMAGE_DETAILS=$(getOSImageDetails)
-
- OS_TYPE=$(echo "${IMAGE_DETAILS}" | jq -c -r '.osDiskImage.operatingSystem')
- checkValNotNull "${OS_TYPE}" "operatingSystem" || return $?
-
- ARCH=$(requestedArch)
- DW_URL=$(downloadURL "${OS_TYPE}" "${ARCH}")
- DW_TOKEN=$(tempDownloadToken "${OS_TYPE}" "${ARCH}")
- DW_FILENAME=$(downloadFilename "${OS_TYPE}" "${ARCH}")
- LABELS=$(labels)
-
- TMP_SCRIPT=$(mktemp)
- TMP_CC=$(mktemp)
-
- INSTALL_TPL=$(cat "${TEMPLATES}/install_runner.tpl")
- CC_TPL=$(cat "${TEMPLATES}/userdata.tpl")
- echo "$INSTALL_TPL" | sed -e "s|GARM_CALLBACK_URL|$(callbackURL)|g" \
- -e "s|GARM_CALLBACK_TOKEN|$(callbackToken)|g" \
- -e "s|GH_DOWNLOAD_URL|${DW_URL}|g" \
- -e "s|GH_FILENAME|${DW_FILENAME}|g" \
- -e "s|GH_TARGET_URL|$(repoURL)|g" \
- -e "s|GARM_METADATA_URL|$(runnerTokenURL)|g" \
- -e "s|GH_RUNNER_NAME|$(instanceName)|g" \
- -e "s|GH_TEMP_DOWNLOAD_TOKEN|${DW_TOKEN}|g" \
- -e "s|GH_RUNNER_LABELS|${LABELS}|g" > ${TMP_SCRIPT}
-
- AS_B64=$(base64 -w0 ${TMP_SCRIPT})
- echo "${CC_TPL}" | sed "s|RUNNER_INSTALL_B64|${AS_B64}|g" > ${TMP_CC}
- echo "${TMP_CC}"
-}
-
-function CreateInstance() {
- if [ -z "$INPUT" ]; then
- echo "expected build params in stdin"
- exit 1
- fi
-
- CC_FILE=$(getCloudConfig)
- VM_SIZE=$(vmSize)
- INSTANCE_NAME=$(instanceName)
- IMAGE_URN=$(imageUrn)
- IMAGE_DETAILS=$(getOSImageDetails)
-
- OS_TYPE=$(echo "${IMAGE_DETAILS}" | jq -c -r '.osDiskImage.operatingSystem' | tr '[:upper:]' '[:lower:]')
- checkValNotNull "${OS_TYPE}" "os_type" || return $?
- OS_NAME=$(echo "${IMAGE_URN}" | cut -d ':' -f2)
- OS_VERSION=$(echo "${IMAGE_URN}" | cut -d ':' -f3)
- ARCH="amd64"
-
- TAGS="garm_controller_id=${GARM_CONTROLLER_ID} garm_pool_id=${GARM_POOL_ID} os_type=${OS_TYPE} os_name=${OS_NAME} os_version=${OS_VERSION} os_arch=${ARCH}"
-
- set +e
-
- az group create -n $INSTANCE_NAME -l $LOCATION --tags $TAGS --only-show-errors -o none
- az vm create -g $INSTANCE_NAME -n $INSTANCE_NAME -l $LOCATION --size $VM_SIZE --image $IMAGE_URN --tags $TAGS --nsg-rule none --public-ip-address "" --user-data "${CC_FILE}" -o none --only-show-errors
- if [[ $? -ne 0 ]]; then
- az group delete -n $INSTANCE_NAME --no-wait --y -o none --only-show-errors
- echo "Failed to create Azure VM"
- exit 1
- fi
- rm -f "${CC_FILE}"
-
- set -e
-
- STATUS=$(vmStatus $INSTANCE_NAME $INSTANCE_NAME)
- FAULT_VAL=""
-
- jq -rnc \
- --arg PROVIDER_ID "${INSTANCE_NAME}" \
- --arg NAME "${INSTANCE_NAME}" \
- --arg OS_TYPE "${OS_TYPE}" \
- --arg OS_NAME "${OS_NAME}" \
- --arg OS_VERSION "${OS_VERSION}" \
- --arg ARCH "${ARCH}" \
- --arg STATUS "${STATUS}" \
- --arg POOL_ID "${GARM_POOL_ID}" \
- --arg FAULT "${FAULT_VAL}" \
- '{"provider_id": $PROVIDER_ID, "name": $NAME, "os_type": $OS_TYPE, "os_name": $OS_NAME, "os_version": $OS_VERSION, "os_arch": $ARCH, "status": $STATUS, "pool_id": $POOL_ID, "provider_fault": $FAULT}'
-}
-
-function DeleteInstance() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ]; then
- echo "missing instance ID in env"
- return 1
- fi
-
- set +e
- rg_info=$(az group show -n "${instance_id}" -o json --only-show-errors 2>&1)
- if [ $? -ne 0 ]; then
- CODE=$?
- set -e
- if echo "${rg_info}" | grep -q "ResourceGroupNotFound"; then
- return 0
- fi
- return $CODE
- fi
- set -e
- az group delete -n "${instance_id}" --no-wait --y --only-show-errors
-}
-
-function StartInstance() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ]; then
- echo "missing instance ID in env"
- return 1
- fi
-
- az vm start -g "${instance_id}" -n "${instance_id}" -o none --only-show-errors
-}
-
-function StopServer() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ]; then
- echo "missing instance ID in env"
- return 1
- fi
-
- az vm deallocate -g "${instance_id}" -n "${instance_id}" -o none --only-show-errors
-}
-
-function GetInstance() {
- local instance_id="${GARM_INSTANCE_ID}"
- info=$(az vm show -d -n $instance_id -g $instance_id -o json --only-show-errors 2>&1)
- echo $info | jq -r '
- {
- provider_id: .name,
- name: .name,
- os_type: .tags.os_type,
- os_name: .tags.os_name,
- os_version: .tags.os_version,
- os_arch: .tags.os_arch,
- pool_id: .tags.garm_pool_id,
- status: {"VM starting": "pending_create", "VM running": "running", "VM stopping": "stopped", "VM stopped": "stopped", "VM deallocating": "stopped", "VM deallocated": "stopped"}[.powerState]
- }'
-}
-
-function ListInstances() {
- INSTANCES=$(az vm list --query "[?tags.garm_pool_id == '${GARM_POOL_ID}']" -o json --only-show-errors 2>&1)
- echo $info | jq -r '[
- .[] | {
- provider_id: .name,
- name: .name,
- os_type: .tags.os_type,
- os_name: .tags.os_name,
- os_version: .tags.os_version,
- os_arch: .tags.os_arch,
- pool_id: .tags.garm_pool_id,
- status: {"Creating": "pending_create", "Migrating": "pending_create", "Failed": "error", "Succeeded": "running", "Deleting": "pending_delete"}[.provisioningState]
- }]'
-}
-
-# Login to Azure
-checkValNotNull "${AZURE_SUBSCRIPTION_ID}" "AZURE_SUBSCRIPTION_ID"
-checkValNotNull "${AZURE_TENANT_ID}" "AZURE_TENANT_ID"
-checkValNotNull "${AZURE_CLIENT_ID}" "AZURE_CLIENT_ID"
-checkValNotNull "${AZURE_CLIENT_SECRET}" "AZURE_CLIENT_SECRET"
-
-export AZURE_CONFIG_DIR="${MYDIR}/.azure"
-
-az login --service-principal -u $AZURE_CLIENT_ID -p $AZURE_CLIENT_SECRET --tenant $AZURE_TENANT_ID -o none --only-show-errors
-az account set -s $AZURE_SUBSCRIPTION_ID -o none --only-show-errors
-
-case "$GARM_COMMAND" in
- "CreateInstance")
- CreateInstance
- ;;
- "DeleteInstance")
- DeleteInstance
- ;;
- "GetInstance")
- GetInstance
- ;;
- "ListInstances")
- ListInstances
- ;;
- "StartInstance")
- StartInstance
- ;;
- "StopInstance")
- StopServer
- ;;
- "RemoveAllInstances")
- echo "RemoveAllInstances not implemented"
- exit 1
- ;;
- *)
- echo "Invalid GARM provider command: \"$GARM_COMMAND\""
- exit 1
- ;;
-esac
diff --git a/contrib/providers.d/openstack/README.md b/contrib/providers.d/openstack/README.md
deleted file mode 100644
index 4995e543..00000000
--- a/contrib/providers.d/openstack/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# OpenStack external provider for garm
-
-This is an example external provider, written for OpenStack. It is a simple bash script that implements the external provider interface, in order to supply ```garm``` with compute instances. This is just an example, complete with a sample config file.
-
-Not all functions are implemented, just the bare minimum to get it to work with the current feature set of ```garm```. It is not meant for production, as it needs a lot more error checking, retries, and potentially more flexibility to be of any use in a real environment.
-
-Images that are used with garm require the following properties set on the image:
-
- * os_type (one of: windows, linux)
- * os_distro
- * os_version
- * architecture (one of: x86_64, armv7l, mips64, mips64el, mips, mipsel)
diff --git a/contrib/providers.d/openstack/cloudconfig/install_runner.tpl b/contrib/providers.d/openstack/cloudconfig/install_runner.tpl
deleted file mode 100644
index 910d8eac..00000000
--- a/contrib/providers.d/openstack/cloudconfig/install_runner.tpl
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/bin/bash
-
-set -e
-set -o pipefail
-
-METADATA_URL="GARM_METADATA_URL"
-CALLBACK_URL="GARM_CALLBACK_URL"
-BEARER_TOKEN="GARM_CALLBACK_TOKEN"
-DOWNLOAD_URL="GH_DOWNLOAD_URL"
-DOWNLOAD_TOKEN="GH_TEMP_DOWNLOAD_TOKEN"
-FILENAME="GH_FILENAME"
-TARGET_URL="GH_TARGET_URL"
-RUNNER_NAME="GH_RUNNER_NAME"
-RUNNER_LABELS="GH_RUNNER_LABELS"
-TEMP_TOKEN=""
-
-
-if [ -z "$METADATA_URL" ];then
- echo "no token is available and METADATA_URL is not set"
- exit 1
-fi
-
-function call() {
- PAYLOAD="$1"
- curl --fail -s -X POST -d "${PAYLOAD}" -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${CALLBACK_URL}" || echo "failed to call home: exit code ($?)"
-}
-
-function sendStatus() {
- MSG="$1"
- call "{\"status\": \"installing\", \"message\": \"$MSG\"}"
-}
-
-function success() {
- MSG="$1"
- ID=$2
- call "{\"status\": \"idle\", \"message\": \"$MSG\", \"agent_id\": $ID}"
-}
-
-function fail() {
- MSG="$1"
- call "{\"status\": \"failed\", \"message\": \"$MSG\"}"
- exit 1
-}
-
-if [ ! -z "$DOWNLOAD_TOKEN" ]; then
- TEMP_TOKEN="Authorization: Bearer $DOWNLOAD_TOKEN"
-fi
-
-sendStatus "downloading tools from ${DOWNLOAD_URL}"
-curl --fail -L -H "${TEMP_TOKEN}" -o "/home/runner/${FILENAME}" "${DOWNLOAD_URL}" || fail "failed to download tools"
-
-mkdir -p /home/runner/actions-runner || fail "failed to create actions-runner folder"
-
-sendStatus "extracting runner"
-tar xf "/home/runner/${FILENAME}" -C /home/runner/actions-runner/ || fail "failed to extract runner"
-chown runner:runner -R /home/runner/actions-runner/ || fail "failed to change owner"
-
-sendStatus "installing dependencies"
-cd /home/runner/actions-runner
-sudo ./bin/installdependencies.sh || fail "failed to install dependencies"
-
-sendStatus "fetching runner registration token"
-GITHUB_TOKEN=$(curl --fail -s -X GET -H 'Accept: application/json' -H "Authorization: Bearer ${BEARER_TOKEN}" "${METADATA_URL}" || fail "failed to get runner registration token")
-
-sendStatus "configuring runner"
-sudo -u runner -- ./config.sh --unattended --url "${TARGET_URL}" --token "${GITHUB_TOKEN}" --name "${RUNNER_NAME}" --labels "${RUNNER_LABELS}" --ephemeral || fail "failed to configure runner"
-
-sendStatus "installing runner service"
-./svc.sh install runner || fail "failed to install service"
-
-sendStatus "starting service"
-./svc.sh start || fail "failed to start service"
-
-set +e
-AGENT_ID=$(grep "agentId" /home/runner/actions-runner/.runner | tr -d -c 0-9)
-if [ $? -ne 0 ];then
- fail "failed to get agent ID"
-fi
-set -e
-
-success "runner successfully installed" $AGENT_ID
\ No newline at end of file
diff --git a/contrib/providers.d/openstack/cloudconfig/userdata.tpl b/contrib/providers.d/openstack/cloudconfig/userdata.tpl
deleted file mode 100644
index 10ef2b51..00000000
--- a/contrib/providers.d/openstack/cloudconfig/userdata.tpl
+++ /dev/null
@@ -1,31 +0,0 @@
-#cloud-config
-package_upgrade: true
-packages:
- - curl
- - tar
-system_info:
- default_user:
- name: runner
- home: /home/runner
- shell: /bin/bash
- groups:
- - sudo
- - adm
- - cdrom
- - dialout
- - dip
- - video
- - plugdev
- - netdev
- - docker
- - lxd
- sudo: ALL=(ALL) NOPASSWD:ALL
-runcmd:
- - /install_runner.sh
- - rm -f /install_runner.sh
-write_files:
- - encoding: b64
- content: RUNNER_INSTALL_B64
- owner: root:root
- path: /install_runner.sh
- permissions: "755"
diff --git a/contrib/providers.d/openstack/garm-external-provider b/contrib/providers.d/openstack/garm-external-provider
deleted file mode 100755
index f2602f57..00000000
--- a/contrib/providers.d/openstack/garm-external-provider
+++ /dev/null
@@ -1,445 +0,0 @@
-#!/bin/bash
-
-set -e
-set -o pipefail
-
-if [ ! -t 0 ]
-then
- INPUT=$(cat -)
-fi
-MYPATH=$(realpath ${BASH_SOURCE[0]})
-MYDIR=$(dirname "${MYPATH}")
-TEMPLATES="$MYDIR/cloudconfig"
-
-# Defaults
-# set this variable to 0 in the provider config to disable.
-BOOT_FROM_VOLUME=${BOOT_FROM_VOLUME:-1}
-
-# END Defaults
-
-if [ -z "$GARM_PROVIDER_CONFIG_FILE" ]
-then
- echo "no config file specified in env"
- exit 1
-fi
-
-source "$GARM_PROVIDER_CONFIG_FILE"
-
-declare -A OS_TO_GH_ARCH_MAP
-OS_TO_GH_ARCH_MAP["x86_64"]="x64"
-OS_TO_GH_ARCH_MAP["armv7l"]="arm64"
-OS_TO_GH_ARCH_MAP["mips64"]="arm64"
-OS_TO_GH_ARCH_MAP["mips64el"]="arm64"
-OS_TO_GH_ARCH_MAP["mips"]="arm"
-OS_TO_GH_ARCH_MAP["mipsel"]="arm"
-
-declare -A OS_TO_GARM_ARCH_MAP
-OS_TO_GARM_ARCH_MAP["x86_64"]="amd64"
-OS_TO_GARM_ARCH_MAP["armv7l"]="arm64"
-OS_TO_GARM_ARCH_MAP["mips64"]="arm64"
-OS_TO_GARM_ARCH_MAP["mips64el"]="arm64"
-OS_TO_GARM_ARCH_MAP["mips"]="arm"
-OS_TO_GARM_ARCH_MAP["mipsel"]="arm"
-
-declare -A GARM_TO_GH_ARCH_MAP
-GARM_TO_GH_ARCH_MAP["amd64"]="x64"
-GARM_TO_GH_ARCH_MAP["arm"]="arm"
-GARM_TO_GH_ARCH_MAP["arm64"]="arm64"
-
-declare -A STATUS_MAP
-STATUS_MAP["ACTIVE"]="running"
-STATUS_MAP["SHUTOFF"]="stopped"
-STATUS_MAP["BUILD"]="pending_create"
-STATUS_MAP["ERROR"]="error"
-STATUS_MAP["DELETING"]="pending_delete"
-
-function checkValNotNull() {
- if [ -z "$1" -o "$1" == "null" ];then
- echo "failed to fetch value $2"
- return 1
- fi
- return 0
-}
-
-function getOSImageDetails() {
- IMAGE_ID=$(echo "$INPUT" | jq -r -c '.image')
- OS_IMAGE=$(openstack image show "$IMAGE_ID" -f json)
- echo "$OS_IMAGE"
-}
-
-function getOpenStackNetworkID() {
- if [ -z "$OPENSTACK_PRIVATE_NETWORK" ]
- then
- echo "no network specified in config"
- return 1
- fi
-
- NET_ID=$(openstack network show ${OPENSTACK_PRIVATE_NETWORK} -f value -c id)
- if [ -z "$NET_ID" ];then
- echo "failed to find network $OPENSTACK_PRIVATE_NETWORK"
- fi
- echo ${NET_ID}
-}
-
-function getVolumeSizeFromFlavor() {
- local flavor="$1"
-
- FLAVOR_DETAILS=$(openstack flavor show "${flavor}" -f json)
- DISK_SIZE=$(echo "$FLAVOR_DETAILS" | jq -c -r '.disk')
- if [ -z "$DISK_SIZE" ];then
- echo "failed to get disk size from flavor"
- return 1
- fi
-
- echo ${DISK_SIZE}
-}
-
-function waitForVolume() {
- local volumeName=$1
- set +e
- status=$(openstack volume show "${volumeName}" -f json | jq -r -c '.status')
- if [ $? -ne 0 ];then
- CODE=$?
- set -e
- return $CODE
- fi
- set -e
- while [ "${status}" != "available" -a "${status}" != "error" ];do
- status=$(openstack volume show "${volumeName}" -f json | jq -r -c '.status')
- done
-}
-
-function createVolumeFromImage() {
- local image="$1"
- local disk_size="$2"
- local instance_name="$3"
- if [ -z ${image} -o -z ${disk_size} -o -z "${instance_name}" ];then
- echo "missing image, disk size or instance name in function call"
- return 1
- fi
- # Instance names contain a UUID. It should be safe to create a volume with the same name and
- # expect it to be unique.
- set +e
- VOLUME_INFO=$(openstack volume create -f json --image "${image}" --size "${disk_size}" "${instance_name}")
- if [ $? -ne 0 ]; then
- CODE=$?
- openstack volume delete "${instance_name}" || true
- set -e
- return $CODE
- fi
- waitForVolume "${instance_name}"
- echo "${VOLUME_INFO}"
-}
-
-function requestedArch() {
- ARCH=$(echo "$INPUT" | jq -c -r '.arch')
- checkValNotNull "${ARCH}" "arch" || return $?
- echo "${ARCH}"
-}
-
-function downloadURL() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- URL=$(echo "$INPUT" | jq -c -r --arg OS "$1" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).download_url')
- checkValNotNull "${URL}" "download URL" || return $?
- echo "${URL}"
-}
-
-function tempDownloadToken() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- TOKEN=$(echo "$INPUT" | jq -c -r --arg OS "$1" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).temp_download_token')
- echo "${TOKEN}"
-}
-
-function runnerTokenURL() {
- METADATA_URL=$(echo "$INPUT" | jq -c -r '."metadata-url"')
- checkValNotNull "${METADATA_URL}" "metadata-url" || return $?
- echo "${METADATA_URL}/runner-registration-token/"
-}
-
-function downloadFilename() {
- [ -z "$1" -o -z "$2" ] && return 1
- GH_ARCH="${GARM_TO_GH_ARCH_MAP[$2]}"
- FN=$(echo "$INPUT" | jq -c -r --arg OS "$1" --arg ARCH "$GH_ARCH" '(.tools[] | select( .os == $OS and .architecture == $ARCH)).filename')
- checkValNotNull "${FN}" "download filename" || return $?
- echo "${FN}"
-}
-
-function poolID() {
- POOL_ID=$(echo "$INPUT" | jq -c -r '.pool_id')
- checkValNotNull "${POOL_ID}" "pool_id" || return $?
- echo "${POOL_ID}"
-}
-
-function flavor() {
- FLAVOR=$(echo "$INPUT" | jq -c -r '.flavor')
- checkValNotNull "${FLAVOR}" "flavor" || return $?
- echo "${FLAVOR}"
-}
-
-function image() {
- IMG=$(echo "$INPUT" | jq -c -r '.image')
- checkValNotNull "${IMG}" "image" || return $?
- echo "${IMG}"
-}
-
-function repoURL() {
- REPO=$(echo "$INPUT" | jq -c -r '.repo_url')
- checkValNotNull "${REPO}" "repo_url" || return $?
- echo "${REPO}"
-}
-
-function callbackURL() {
- CB_URL=$(echo "$INPUT" | jq -c -r '."callback-url"')
- checkValNotNull "${CB_URL}" "callback-url" || return $?
- echo "${CB_URL}"
-}
-
-function callbackToken() {
- CB_TK=$(echo "$INPUT" | jq -c -r '."instance-token"')
- checkValNotNull "${CB_TK}" "instance-token" || return $?
- echo "${CB_TK}"
-}
-
-function instanceName() {
- NAME=$(echo "$INPUT" | jq -c -r '.name')
- checkValNotNull "${NAME}" "name" || return $?
- echo "${NAME}"
-}
-
-function labels() {
- LBL=$(echo "$INPUT" | jq -c -r '.labels | join(",")')
- checkValNotNull "${LBL}" "labels" || return $?
- echo "${LBL}"
-}
-
-function getCloudConfig() {
- IMAGE_DETAILS=$(getOSImageDetails)
-
- OS_TYPE=$(echo "${IMAGE_DETAILS}" | jq -c -r '.properties.os_type')
- checkValNotNull "${OS_TYPE}" "os_type" || return $?
-
- ARCH=$(requestedArch)
- DW_URL=$(downloadURL "${OS_TYPE}" "${ARCH}")
- DW_TOKEN=$(tempDownloadToken "${OS_TYPE}" "${ARCH}")
- DW_FILENAME=$(downloadFilename "${OS_TYPE}" "${ARCH}")
- LABELS=$(labels)
-
- TMP_SCRIPT=$(mktemp)
- TMP_CC=$(mktemp)
-
- INSTALL_TPL=$(cat "${TEMPLATES}/install_runner.tpl")
- CC_TPL=$(cat "${TEMPLATES}/userdata.tpl")
- echo "$INSTALL_TPL" | sed -e "s|GARM_CALLBACK_URL|$(callbackURL)|g" \
- -e "s|GARM_CALLBACK_TOKEN|$(callbackToken)|g" \
- -e "s|GH_DOWNLOAD_URL|${DW_URL}|g" \
- -e "s|GH_FILENAME|${DW_FILENAME}|g" \
- -e "s|GH_TARGET_URL|$(repoURL)|g" \
- -e "s|GARM_METADATA_URL|$(runnerTokenURL)|g" \
- -e "s|GH_RUNNER_NAME|$(instanceName)|g" \
- -e "s|GH_TEMP_DOWNLOAD_TOKEN|${DW_TOKEN}|g" \
- -e "s|GH_RUNNER_LABELS|${LABELS}|g" > ${TMP_SCRIPT}
-
- AS_B64=$(base64 -w0 ${TMP_SCRIPT})
- echo "${CC_TPL}" | sed "s|RUNNER_INSTALL_B64|${AS_B64}|g" > ${TMP_CC}
- echo "${TMP_CC}"
-}
-
-function waitForServer() {
- local srv_id="$1"
-
- srv_info=$(openstack server show -f json "${srv_id}")
- [ $? -ne 0 ] && return $?
-
- status=$(echo "${srv_info}" | jq -r -c '.status')
-
- while [ "${status}" != "ERROR" -a "${status}" != "ACTIVE" ];do
- sleep 0.5
- srv_info=$(openstack server show -f json "${srv_id}")
- [ $? -ne 0 ] && return $?
- status=$(echo "${srv_info}" | jq -r -c '.status')
- done
- echo "${srv_info}"
-}
-
-function CreateInstance() {
- if [ -z "$INPUT" ];then
- echo "expected build params in stdin"
- exit 1
- fi
-
- CC_FILE=$(getCloudConfig)
- FLAVOR=$(flavor)
- IMAGE=$(image)
- INSTANCE_NAME=$(instanceName)
- NET=$(getOpenStackNetworkID)
- IMAGE_DETAILS=$(getOSImageDetails)
-
- OS_TYPE=$(echo "${IMAGE_DETAILS}" | jq -c -r '.properties.os_type')
- checkValNotNull "${OS_TYPE}" "os_type" || return $?
- DISTRO=$(echo "${IMAGE_DETAILS}" | jq -c -r '.properties.os_distro')
- checkValNotNull "${DISTRO}" "os_distro" || return $?
- VERSION=$(echo "${IMAGE_DETAILS}" | jq -c -r '.properties.os_version')
- checkValNotNull "${VERSION}" "os_version" || return $?
- ARCH=$(echo "${IMAGE_DETAILS}" | jq -c -r '.properties.architecture')
- checkValNotNull "${ARCH}" "architecture" || return $?
- GH_ARCH=${OS_TO_GH_ARCH_MAP[${ARCH}]}
-
- if [ -z "${GH_ARCH}" ];then
- GH_ARCH=${ARCH}
- fi
-
- SOURCE_ARGS=""
-
- if [ "${BOOT_FROM_VOLUME}" -eq 1 ];then
- VOL_SIZE=$(getVolumeSizeFromFlavor "${FLAVOR}")
- VOL_INFO=$(createVolumeFromImage "${IMAGE}" "${VOL_SIZE}" "${INSTANCE_NAME}")
- if [ $? -ne 0 ];then
- openstack volume delete "${INSTANCE_NAME}" || true
- fi
- SOURCE_ARGS="--volume ${INSTANCE_NAME}"
- else
- SOURCE_ARGS="--image ${IMAGE}"
- fi
-
- set +e
-
- TAGS="--tag garm-controller-id=${GARM_CONTROLLER_ID} --tag garm-pool-id=${GARM_POOL_ID}"
- PROPERTIES="--property os_type=${OS_TYPE} --property os_name=${DISTRO} --property os_version=${VERSION} --property os_arch=${GH_ARCH} --property pool_id=${GARM_POOL_ID}"
- SRV_DETAILS=$(openstack server create --os-compute-api-version 2.52 ${SOURCE_ARGS} ${TAGS} ${PROPERTIES} --flavor "${FLAVOR}" --user-data="${CC_FILE}" --network="${NET}" "${INSTANCE_NAME}")
- if [ $? -ne 0 ];then
- openstack volume delete "${INSTANCE_NAME}" || true
- exit 1
- fi
- SRV_DETAILS=$(waitForServer "${INSTANCE_NAME}")
- if [ $? -ne 0 ];then
- CODE=$?
- # cleanup
- rm -f "${CC_FILE}" || true
- openstack server delete "${INSTANCE_NAME}" || true
- openstack volume delete "${INSTANCE_NAME}" || true
- set -e
- FAULT=$(echo "${SRV_DETAILS}"| jq -rc '.fault')
- echo "Failed to create server: ${FAULT}"
- exit $CODE
- fi
- set -e
- rm -f "${CC_FILE}" || true
-
- SRV_ID=$(echo "${SRV_DETAILS}" | jq -r -c '.id')
- STATUS=$(echo "${SRV_DETAILS}" | jq -r -c '.status')
- FAULT=$(echo "${SRV_DETAILS}" | jq -r -c '.fault')
- FAULT_VAL=""
- if [ ! -z "${FAULT}" -a "${FAULT}" != "null" ];then
- FAULT_VAL=$(echo "${FAULT}" | base64 -w0)
- fi
-
- jq -rnc \
- --arg PROVIDER_ID ${SRV_ID} \
- --arg NAME "${INSTANCE_NAME}" \
- --arg OS_TYPE "${OS_TYPE}" \
- --arg OS_NAME "${DISTRO}" \
- --arg OS_VERSION "${VERSION}" \
- --arg ARCH "${GH_ARCH}" \
- --arg STATUS "${STATUS_MAP[${STATUS}]}" \
- --arg POOL_ID "${GARM_POOL_ID}" \
- --arg FAULT "${FAULT_VAL}" \
- '{"provider_id": $PROVIDER_ID, "name": $NAME, "os_type": $OS_TYPE, "os_name": $OS_NAME, "os_version": $OS_VERSION, "os_arch": $ARCH, "status": $STATUS, "pool_id": $POOL_ID, "provider_fault": $FAULT}'
-}
-
-function DeleteInstance() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ];then
- echo "missing instance ID in env"
- return 1
- fi
-
- set +e
- instance_info=$(openstack server show "${instance_id}" -f json 2>&1)
- if [ $? -ne 0 ];then
- CODE=$?
- set -e
- if [ "${instance_info}" == "No server with a name or ID of*" ];then
- return 0
- fi
- return $CODE
- fi
- set -e
- VOLUMES=$(echo "${instance_info}" | jq -r -c '.volumes_attached[] | .id')
-
- openstack server delete "${instance_id}"
- for vol in "$VOLUMES";do
- waitForVolume "${vol}"
- openstack volume delete $vol || true
- done
-}
-
-function StartInstance() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ];then
- echo "missing instance ID in env"
- return 1
- fi
-
- openstack server start "${instance_id}"
-}
-
-function StopServer() {
- local instance_id="${GARM_INSTANCE_ID}"
- if [ -z "${instance_id}" ];then
- echo "missing instance ID in env"
- return 1
- fi
-
- openstack server stop "${instance_id}"
-}
-
-function ListInstances() {
- INSTANCES=$(openstack server list --os-compute-api-version 2.52 --tags garm-pool-id=${GARM_POOL_ID} --long -f json)
- echo ${INSTANCES} | jq -r '[
- .[] | .Properties * {
- provider_id: .ID,
- name: .Name,
- status: {"ACTIVE": "running", "SHUTOFF": "stopped", "BUILD": "pending_create", "ERROR": "error", "DELETING": "pending_delete"}[.Status]
- }]'
-}
-
-function GetInstance() {
- INSTANCE=$(openstack server show --os-compute-api-version 2.52 ${GARM_INSTANCE_ID} -f json)
- echo ${INSTANCES} | jq -r '.properties * {
- provider_id: .id,
- name: .name,
- status: {"ACTIVE": "running", "SHUTOFF": "stopped", "BUILD": "pending_create", "ERROR": "error", "DELETING": "pending_delete"}[.status]
- }'
-}
-
-case "$GARM_COMMAND" in
- "CreateInstance")
- CreateInstance
- ;;
- "DeleteInstance")
- DeleteInstance
- ;;
- "GetInstance")
- GetInstance
- ;;
- "ListInstances")
- ListInstances
- ;;
- "StartInstance")
- StartInstance
- ;;
- "StopInstance")
- StopServer
- ;;
- "RemoveAllInstances")
- echo "RemoveAllInstances not implemented"
- exit 1
- ;;
- *)
- echo "Invalid GARM provider command: \"$GARM_COMMAND\""
- exit 1
- ;;
-esac
-
diff --git a/contrib/providers.d/openstack/keystonerc b/contrib/providers.d/openstack/keystonerc
deleted file mode 100644
index 1b702dd7..00000000
--- a/contrib/providers.d/openstack/keystonerc
+++ /dev/null
@@ -1,16 +0,0 @@
-# OpenStack client config
-export OS_REGION_NAME=RegionOne
-export OS_AUTH_VERSION=3
-export OS_AUTH_URL=http://10.0.8.36:5000/v3
-export OS_PROJECT_DOMAIN_NAME=admin_domain
-export OS_USERNAME=admin
-export OS_AUTH_TYPE=password
-export OS_USER_DOMAIN_NAME=admin_domain
-export OS_PROJECT_NAME=admin
-export OS_PASSWORD=Iegeehahth4suSie
-export OS_IDENTITY_API_VERSION=3
-
-
-# GARM config
-export OPENSTACK_PRIVATE_NETWORK="int_net"
-export BOOT_FROM_VOLUME=1
diff --git a/database/common/common.go b/database/common/common.go
deleted file mode 100644
index 897e9445..00000000
--- a/database/common/common.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package common
-
-import (
- "context"
-
- "github.com/cloudbase/garm/params"
-)
-
-type RepoStore interface {
- CreateRepository(ctx context.Context, owner, name, credentialsName, webhookSecret string) (params.Repository, error)
- GetRepository(ctx context.Context, owner, name string) (params.Repository, error)
- GetRepositoryByID(ctx context.Context, repoID string) (params.Repository, error)
- ListRepositories(ctx context.Context) ([]params.Repository, error)
- DeleteRepository(ctx context.Context, repoID string) error
- UpdateRepository(ctx context.Context, repoID string, param params.UpdateRepositoryParams) (params.Repository, error)
-
- CreateRepositoryPool(ctx context.Context, repoId string, param params.CreatePoolParams) (params.Pool, error)
-
- GetRepositoryPool(ctx context.Context, repoID, poolID string) (params.Pool, error)
- DeleteRepositoryPool(ctx context.Context, repoID, poolID string) error
- UpdateRepositoryPool(ctx context.Context, repoID, poolID string, param params.UpdatePoolParams) (params.Pool, error)
- FindRepositoryPoolByTags(ctx context.Context, repoID string, tags []string) (params.Pool, error)
-
- ListRepoPools(ctx context.Context, repoID string) ([]params.Pool, error)
- ListRepoInstances(ctx context.Context, repoID string) ([]params.Instance, error)
-}
-
-type OrgStore interface {
- CreateOrganization(ctx context.Context, name, credentialsName, webhookSecret string) (params.Organization, error)
- GetOrganization(ctx context.Context, name string) (params.Organization, error)
- GetOrganizationByID(ctx context.Context, orgID string) (params.Organization, error)
- ListOrganizations(ctx context.Context) ([]params.Organization, error)
- DeleteOrganization(ctx context.Context, orgID string) error
- UpdateOrganization(ctx context.Context, orgID string, param params.UpdateRepositoryParams) (params.Organization, error)
-
- CreateOrganizationPool(ctx context.Context, orgId string, param params.CreatePoolParams) (params.Pool, error)
- GetOrganizationPool(ctx context.Context, orgID, poolID string) (params.Pool, error)
- DeleteOrganizationPool(ctx context.Context, orgID, poolID string) error
- UpdateOrganizationPool(ctx context.Context, orgID, poolID string, param params.UpdatePoolParams) (params.Pool, error)
-
- FindOrganizationPoolByTags(ctx context.Context, orgID string, tags []string) (params.Pool, error)
- ListOrgPools(ctx context.Context, orgID string) ([]params.Pool, error)
- ListOrgInstances(ctx context.Context, orgID string) ([]params.Instance, error)
-}
-
-type EnterpriseStore interface {
- CreateEnterprise(ctx context.Context, name, credentialsName, webhookSecret string) (params.Enterprise, error)
- GetEnterprise(ctx context.Context, name string) (params.Enterprise, error)
- GetEnterpriseByID(ctx context.Context, enterpriseID string) (params.Enterprise, error)
- ListEnterprises(ctx context.Context) ([]params.Enterprise, error)
- DeleteEnterprise(ctx context.Context, enterpriseID string) error
- UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateRepositoryParams) (params.Enterprise, error)
-
- CreateEnterprisePool(ctx context.Context, enterpriseID string, param params.CreatePoolParams) (params.Pool, error)
- GetEnterprisePool(ctx context.Context, enterpriseID, poolID string) (params.Pool, error)
- DeleteEnterprisePool(ctx context.Context, enterpriseID, poolID string) error
- UpdateEnterprisePool(ctx context.Context, enterpriseID, poolID string, param params.UpdatePoolParams) (params.Pool, error)
-
- FindEnterprisePoolByTags(ctx context.Context, enterpriseID string, tags []string) (params.Pool, error)
- ListEnterprisePools(ctx context.Context, enterpriseID string) ([]params.Pool, error)
- ListEnterpriseInstances(ctx context.Context, enterpriseID string) ([]params.Instance, error)
-}
-
-type PoolStore interface {
- // Probably a bad idea without some king of filter or at least pagination
- // TODO: add filter/pagination
- ListAllPools(ctx context.Context) ([]params.Pool, error)
- GetPoolByID(ctx context.Context, poolID string) (params.Pool, error)
- DeletePoolByID(ctx context.Context, poolID string) error
-
- ListPoolInstances(ctx context.Context, poolID string) ([]params.Instance, error)
-
- PoolInstanceCount(ctx context.Context, poolID string) (int64, error)
- GetPoolInstanceByName(ctx context.Context, poolID string, instanceName string) (params.Instance, error)
-}
-
-type UserStore interface {
- GetUser(ctx context.Context, user string) (params.User, error)
- GetUserByID(ctx context.Context, userID string) (params.User, error)
-
- CreateUser(ctx context.Context, user params.NewUserParams) (params.User, error)
- UpdateUser(ctx context.Context, user string, param params.UpdateUserParams) (params.User, error)
- HasAdminUser(ctx context.Context) bool
-}
-
-type InstanceStore interface {
- CreateInstance(ctx context.Context, poolID string, param params.CreateInstanceParams) (params.Instance, error)
- DeleteInstance(ctx context.Context, poolID string, instanceName string) error
- UpdateInstance(ctx context.Context, instanceID string, param params.UpdateInstanceParams) (params.Instance, error)
-
- // Probably a bad idea without some king of filter or at least pagination
- // TODO: add filter/pagination
- ListAllInstances(ctx context.Context) ([]params.Instance, error)
-
- GetInstanceByName(ctx context.Context, instanceName string) (params.Instance, error)
- AddInstanceEvent(ctx context.Context, instanceID string, event params.EventType, eventLevel params.EventLevel, eventMessage string) error
- ListInstanceEvents(ctx context.Context, instanceID string, eventType params.EventType, eventLevel params.EventLevel) ([]params.StatusMessage, error)
-}
-
-//go:generate mockery --name=Store
-type Store interface {
- RepoStore
- OrgStore
- EnterpriseStore
- PoolStore
- UserStore
- InstanceStore
-
- ControllerInfo() (params.ControllerInfo, error)
- InitController() (params.ControllerInfo, error)
-}
diff --git a/database/common/errors.go b/database/common/errors.go
new file mode 100644
index 00000000..5e6a5087
--- /dev/null
+++ b/database/common/errors.go
@@ -0,0 +1,29 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+import "fmt"
+
+var (
+ ErrProducerClosed = fmt.Errorf("producer is closed")
+ ErrProducerTimeoutErr = fmt.Errorf("producer timeout error")
+ ErrProducerAlreadyRegistered = fmt.Errorf("producer already registered")
+ ErrConsumerAlreadyRegistered = fmt.Errorf("consumer already registered")
+ ErrWatcherAlreadyStarted = fmt.Errorf("watcher already started")
+ ErrWatcherNotInitialized = fmt.Errorf("watcher not initialized")
+ ErrInvalidOperationType = fmt.Errorf("invalid operation")
+ ErrInvalidEntityType = fmt.Errorf("invalid entity type")
+ ErrNoFiltersProvided = fmt.Errorf("no filters provided")
+)
diff --git a/database/common/mocks/Store.go b/database/common/mocks/Store.go
index 71a2d038..024a1271 100644
--- a/database/common/mocks/Store.go
+++ b/database/common/mocks/Store.go
@@ -1,4 +1,4 @@
-// Code generated by mockery v2.22.1. DO NOT EDIT.
+// Code generated by mockery. DO NOT EDIT.
package mocks
@@ -14,13 +14,25 @@ type Store struct {
mock.Mock
}
-// AddInstanceEvent provides a mock function with given fields: ctx, instanceID, event, eventLevel, eventMessage
-func (_m *Store) AddInstanceEvent(ctx context.Context, instanceID string, event params.EventType, eventLevel params.EventLevel, eventMessage string) error {
- ret := _m.Called(ctx, instanceID, event, eventLevel, eventMessage)
+type Store_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *Store) EXPECT() *Store_Expecter {
+ return &Store_Expecter{mock: &_m.Mock}
+}
+
+// AddEntityEvent provides a mock function with given fields: ctx, entity, event, eventLevel, statusMessage, maxEvents
+func (_m *Store) AddEntityEvent(ctx context.Context, entity params.ForgeEntity, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error {
+ ret := _m.Called(ctx, entity, event, eventLevel, statusMessage, maxEvents)
+
+ if len(ret) == 0 {
+ panic("no return value specified for AddEntityEvent")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.EventType, params.EventLevel, string) error); ok {
- r0 = rf(ctx, instanceID, event, eventLevel, eventMessage)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, params.EventType, params.EventLevel, string, int) error); ok {
+ r0 = rf(ctx, entity, event, eventLevel, statusMessage, maxEvents)
} else {
r0 = ret.Error(0)
}
@@ -28,10 +40,144 @@ func (_m *Store) AddInstanceEvent(ctx context.Context, instanceID string, event
return r0
}
-// ControllerInfo provides a mock function with given fields:
+// Store_AddEntityEvent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddEntityEvent'
+type Store_AddEntityEvent_Call struct {
+ *mock.Call
+}
+
+// AddEntityEvent is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+// - event params.EventType
+// - eventLevel params.EventLevel
+// - statusMessage string
+// - maxEvents int
+func (_e *Store_Expecter) AddEntityEvent(ctx interface{}, entity interface{}, event interface{}, eventLevel interface{}, statusMessage interface{}, maxEvents interface{}) *Store_AddEntityEvent_Call {
+ return &Store_AddEntityEvent_Call{Call: _e.mock.On("AddEntityEvent", ctx, entity, event, eventLevel, statusMessage, maxEvents)}
+}
+
+func (_c *Store_AddEntityEvent_Call) Run(run func(ctx context.Context, entity params.ForgeEntity, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int)) *Store_AddEntityEvent_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(params.EventType), args[3].(params.EventLevel), args[4].(string), args[5].(int))
+ })
+ return _c
+}
+
+func (_c *Store_AddEntityEvent_Call) Return(_a0 error) *Store_AddEntityEvent_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_AddEntityEvent_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, params.EventType, params.EventLevel, string, int) error) *Store_AddEntityEvent_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// AddInstanceEvent provides a mock function with given fields: ctx, instanceNameOrID, event, eventLevel, eventMessage
+func (_m *Store) AddInstanceEvent(ctx context.Context, instanceNameOrID string, event params.EventType, eventLevel params.EventLevel, eventMessage string) error {
+ ret := _m.Called(ctx, instanceNameOrID, event, eventLevel, eventMessage)
+
+ if len(ret) == 0 {
+ panic("no return value specified for AddInstanceEvent")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.EventType, params.EventLevel, string) error); ok {
+ r0 = rf(ctx, instanceNameOrID, event, eventLevel, eventMessage)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_AddInstanceEvent_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddInstanceEvent'
+type Store_AddInstanceEvent_Call struct {
+ *mock.Call
+}
+
+// AddInstanceEvent is a helper method to define mock.On call
+// - ctx context.Context
+// - instanceNameOrID string
+// - event params.EventType
+// - eventLevel params.EventLevel
+// - eventMessage string
+func (_e *Store_Expecter) AddInstanceEvent(ctx interface{}, instanceNameOrID interface{}, event interface{}, eventLevel interface{}, eventMessage interface{}) *Store_AddInstanceEvent_Call {
+ return &Store_AddInstanceEvent_Call{Call: _e.mock.On("AddInstanceEvent", ctx, instanceNameOrID, event, eventLevel, eventMessage)}
+}
+
+func (_c *Store_AddInstanceEvent_Call) Run(run func(ctx context.Context, instanceNameOrID string, event params.EventType, eventLevel params.EventLevel, eventMessage string)) *Store_AddInstanceEvent_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.EventType), args[3].(params.EventLevel), args[4].(string))
+ })
+ return _c
+}
+
+func (_c *Store_AddInstanceEvent_Call) Return(_a0 error) *Store_AddInstanceEvent_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_AddInstanceEvent_Call) RunAndReturn(run func(context.Context, string, params.EventType, params.EventLevel, string) error) *Store_AddInstanceEvent_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// BreakLockJobIsQueued provides a mock function with given fields: ctx, jobID
+func (_m *Store) BreakLockJobIsQueued(ctx context.Context, jobID int64) error {
+ ret := _m.Called(ctx, jobID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for BreakLockJobIsQueued")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
+ r0 = rf(ctx, jobID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_BreakLockJobIsQueued_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BreakLockJobIsQueued'
+type Store_BreakLockJobIsQueued_Call struct {
+ *mock.Call
+}
+
+// BreakLockJobIsQueued is a helper method to define mock.On call
+// - ctx context.Context
+// - jobID int64
+func (_e *Store_Expecter) BreakLockJobIsQueued(ctx interface{}, jobID interface{}) *Store_BreakLockJobIsQueued_Call {
+ return &Store_BreakLockJobIsQueued_Call{Call: _e.mock.On("BreakLockJobIsQueued", ctx, jobID)}
+}
+
+func (_c *Store_BreakLockJobIsQueued_Call) Run(run func(ctx context.Context, jobID int64)) *Store_BreakLockJobIsQueued_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *Store_BreakLockJobIsQueued_Call) Return(_a0 error) *Store_BreakLockJobIsQueued_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_BreakLockJobIsQueued_Call) RunAndReturn(run func(context.Context, int64) error) *Store_BreakLockJobIsQueued_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ControllerInfo provides a mock function with no fields
func (_m *Store) ControllerInfo() (params.ControllerInfo, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for ControllerInfo")
+ }
+
var r0 params.ControllerInfo
var r1 error
if rf, ok := ret.Get(0).(func() (params.ControllerInfo, error)); ok {
@@ -52,23 +198,54 @@ func (_m *Store) ControllerInfo() (params.ControllerInfo, error) {
return r0, r1
}
-// CreateEnterprise provides a mock function with given fields: ctx, name, credentialsName, webhookSecret
-func (_m *Store) CreateEnterprise(ctx context.Context, name string, credentialsName string, webhookSecret string) (params.Enterprise, error) {
- ret := _m.Called(ctx, name, credentialsName, webhookSecret)
+// Store_ControllerInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ControllerInfo'
+type Store_ControllerInfo_Call struct {
+ *mock.Call
+}
+
+// ControllerInfo is a helper method to define mock.On call
+func (_e *Store_Expecter) ControllerInfo() *Store_ControllerInfo_Call {
+ return &Store_ControllerInfo_Call{Call: _e.mock.On("ControllerInfo")}
+}
+
+func (_c *Store_ControllerInfo_Call) Run(run func()) *Store_ControllerInfo_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *Store_ControllerInfo_Call) Return(_a0 params.ControllerInfo, _a1 error) *Store_ControllerInfo_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ControllerInfo_Call) RunAndReturn(run func() (params.ControllerInfo, error)) *Store_ControllerInfo_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateEnterprise provides a mock function with given fields: ctx, name, credentialsName, webhookSecret, poolBalancerType
+func (_m *Store) CreateEnterprise(ctx context.Context, name string, credentialsName params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (params.Enterprise, error) {
+ ret := _m.Called(ctx, name, credentialsName, webhookSecret, poolBalancerType)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEnterprise")
+ }
var r0 params.Enterprise
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (params.Enterprise, error)); ok {
- return rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Enterprise, error)); ok {
+ return rf(ctx, name, credentialsName, webhookSecret, poolBalancerType)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string) params.Enterprise); ok {
- r0 = rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) params.Enterprise); ok {
+ r0 = rf(ctx, name, credentialsName, webhookSecret, poolBalancerType)
} else {
r0 = ret.Get(0).(params.Enterprise)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
- r1 = rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) error); ok {
+ r1 = rf(ctx, name, credentialsName, webhookSecret, poolBalancerType)
} else {
r1 = ret.Error(1)
}
@@ -76,23 +253,59 @@ func (_m *Store) CreateEnterprise(ctx context.Context, name string, credentialsN
return r0, r1
}
-// CreateEnterprisePool provides a mock function with given fields: ctx, enterpriseID, param
-func (_m *Store) CreateEnterprisePool(ctx context.Context, enterpriseID string, param params.CreatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, enterpriseID, param)
+// Store_CreateEnterprise_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEnterprise'
+type Store_CreateEnterprise_Call struct {
+ *mock.Call
+}
+
+// CreateEnterprise is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - credentialsName params.ForgeCredentials
+// - webhookSecret string
+// - poolBalancerType params.PoolBalancerType
+func (_e *Store_Expecter) CreateEnterprise(ctx interface{}, name interface{}, credentialsName interface{}, webhookSecret interface{}, poolBalancerType interface{}) *Store_CreateEnterprise_Call {
+ return &Store_CreateEnterprise_Call{Call: _e.mock.On("CreateEnterprise", ctx, name, credentialsName, webhookSecret, poolBalancerType)}
+}
+
+func (_c *Store_CreateEnterprise_Call) Run(run func(ctx context.Context, name string, credentialsName params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType)) *Store_CreateEnterprise_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.ForgeCredentials), args[3].(string), args[4].(params.PoolBalancerType))
+ })
+ return _c
+}
+
+func (_c *Store_CreateEnterprise_Call) Return(_a0 params.Enterprise, _a1 error) *Store_CreateEnterprise_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateEnterprise_Call) RunAndReturn(run func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Enterprise, error)) *Store_CreateEnterprise_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateEntityPool provides a mock function with given fields: ctx, entity, param
+func (_m *Store) CreateEntityPool(ctx context.Context, entity params.ForgeEntity, param params.CreatePoolParams) (params.Pool, error) {
+ ret := _m.Called(ctx, entity, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityPool")
+ }
var r0 params.Pool
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, enterpriseID, param)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, params.CreatePoolParams) (params.Pool, error)); ok {
+ return rf(ctx, entity, param)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) params.Pool); ok {
- r0 = rf(ctx, enterpriseID, param)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, params.CreatePoolParams) params.Pool); ok {
+ r0 = rf(ctx, entity, param)
} else {
r0 = ret.Get(0).(params.Pool)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, params.CreatePoolParams) error); ok {
- r1 = rf(ctx, enterpriseID, param)
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity, params.CreatePoolParams) error); ok {
+ r1 = rf(ctx, entity, param)
} else {
r1 = ret.Error(1)
}
@@ -100,10 +313,330 @@ func (_m *Store) CreateEnterprisePool(ctx context.Context, enterpriseID string,
return r0, r1
}
+// Store_CreateEntityPool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityPool'
+type Store_CreateEntityPool_Call struct {
+ *mock.Call
+}
+
+// CreateEntityPool is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+// - param params.CreatePoolParams
+func (_e *Store_Expecter) CreateEntityPool(ctx interface{}, entity interface{}, param interface{}) *Store_CreateEntityPool_Call {
+ return &Store_CreateEntityPool_Call{Call: _e.mock.On("CreateEntityPool", ctx, entity, param)}
+}
+
+func (_c *Store_CreateEntityPool_Call) Run(run func(ctx context.Context, entity params.ForgeEntity, param params.CreatePoolParams)) *Store_CreateEntityPool_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(params.CreatePoolParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateEntityPool_Call) Return(_a0 params.Pool, _a1 error) *Store_CreateEntityPool_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateEntityPool_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, params.CreatePoolParams) (params.Pool, error)) *Store_CreateEntityPool_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateEntityScaleSet provides a mock function with given fields: _a0, entity, param
+func (_m *Store) CreateEntityScaleSet(_a0 context.Context, entity params.ForgeEntity, param params.CreateScaleSetParams) (params.ScaleSet, error) {
+ ret := _m.Called(_a0, entity, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityScaleSet")
+ }
+
+ var r0 params.ScaleSet
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, params.CreateScaleSetParams) (params.ScaleSet, error)); ok {
+ return rf(_a0, entity, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, params.CreateScaleSetParams) params.ScaleSet); ok {
+ r0 = rf(_a0, entity, param)
+ } else {
+ r0 = ret.Get(0).(params.ScaleSet)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity, params.CreateScaleSetParams) error); ok {
+ r1 = rf(_a0, entity, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_CreateEntityScaleSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityScaleSet'
+type Store_CreateEntityScaleSet_Call struct {
+ *mock.Call
+}
+
+// CreateEntityScaleSet is a helper method to define mock.On call
+// - _a0 context.Context
+// - entity params.ForgeEntity
+// - param params.CreateScaleSetParams
+func (_e *Store_Expecter) CreateEntityScaleSet(_a0 interface{}, entity interface{}, param interface{}) *Store_CreateEntityScaleSet_Call {
+ return &Store_CreateEntityScaleSet_Call{Call: _e.mock.On("CreateEntityScaleSet", _a0, entity, param)}
+}
+
+func (_c *Store_CreateEntityScaleSet_Call) Run(run func(_a0 context.Context, entity params.ForgeEntity, param params.CreateScaleSetParams)) *Store_CreateEntityScaleSet_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(params.CreateScaleSetParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateEntityScaleSet_Call) Return(scaleSet params.ScaleSet, err error) *Store_CreateEntityScaleSet_Call {
+ _c.Call.Return(scaleSet, err)
+ return _c
+}
+
+func (_c *Store_CreateEntityScaleSet_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, params.CreateScaleSetParams) (params.ScaleSet, error)) *Store_CreateEntityScaleSet_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateGiteaCredentials provides a mock function with given fields: ctx, param
+func (_m *Store) CreateGiteaCredentials(ctx context.Context, param params.CreateGiteaCredentialsParams) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateGiteaCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGiteaCredentialsParams) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGiteaCredentialsParams) params.ForgeCredentials); ok {
+ r0 = rf(ctx, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.CreateGiteaCredentialsParams) error); ok {
+ r1 = rf(ctx, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_CreateGiteaCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateGiteaCredentials'
+type Store_CreateGiteaCredentials_Call struct {
+ *mock.Call
+}
+
+// CreateGiteaCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - param params.CreateGiteaCredentialsParams
+func (_e *Store_Expecter) CreateGiteaCredentials(ctx interface{}, param interface{}) *Store_CreateGiteaCredentials_Call {
+ return &Store_CreateGiteaCredentials_Call{Call: _e.mock.On("CreateGiteaCredentials", ctx, param)}
+}
+
+func (_c *Store_CreateGiteaCredentials_Call) Run(run func(ctx context.Context, param params.CreateGiteaCredentialsParams)) *Store_CreateGiteaCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.CreateGiteaCredentialsParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateGiteaCredentials_Call) Return(gtCreds params.ForgeCredentials, err error) *Store_CreateGiteaCredentials_Call {
+ _c.Call.Return(gtCreds, err)
+ return _c
+}
+
+func (_c *Store_CreateGiteaCredentials_Call) RunAndReturn(run func(context.Context, params.CreateGiteaCredentialsParams) (params.ForgeCredentials, error)) *Store_CreateGiteaCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateGiteaEndpoint provides a mock function with given fields: _a0, param
+func (_m *Store) CreateGiteaEndpoint(_a0 context.Context, param params.CreateGiteaEndpointParams) (params.ForgeEndpoint, error) {
+ ret := _m.Called(_a0, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateGiteaEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGiteaEndpointParams) (params.ForgeEndpoint, error)); ok {
+ return rf(_a0, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGiteaEndpointParams) params.ForgeEndpoint); ok {
+ r0 = rf(_a0, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEndpoint)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.CreateGiteaEndpointParams) error); ok {
+ r1 = rf(_a0, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_CreateGiteaEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateGiteaEndpoint'
+type Store_CreateGiteaEndpoint_Call struct {
+ *mock.Call
+}
+
+// CreateGiteaEndpoint is a helper method to define mock.On call
+// - _a0 context.Context
+// - param params.CreateGiteaEndpointParams
+func (_e *Store_Expecter) CreateGiteaEndpoint(_a0 interface{}, param interface{}) *Store_CreateGiteaEndpoint_Call {
+ return &Store_CreateGiteaEndpoint_Call{Call: _e.mock.On("CreateGiteaEndpoint", _a0, param)}
+}
+
+func (_c *Store_CreateGiteaEndpoint_Call) Run(run func(_a0 context.Context, param params.CreateGiteaEndpointParams)) *Store_CreateGiteaEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.CreateGiteaEndpointParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateGiteaEndpoint_Call) Return(ghEndpoint params.ForgeEndpoint, err error) *Store_CreateGiteaEndpoint_Call {
+ _c.Call.Return(ghEndpoint, err)
+ return _c
+}
+
+func (_c *Store_CreateGiteaEndpoint_Call) RunAndReturn(run func(context.Context, params.CreateGiteaEndpointParams) (params.ForgeEndpoint, error)) *Store_CreateGiteaEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateGithubCredentials provides a mock function with given fields: ctx, param
+func (_m *Store) CreateGithubCredentials(ctx context.Context, param params.CreateGithubCredentialsParams) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateGithubCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGithubCredentialsParams) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGithubCredentialsParams) params.ForgeCredentials); ok {
+ r0 = rf(ctx, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.CreateGithubCredentialsParams) error); ok {
+ r1 = rf(ctx, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_CreateGithubCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateGithubCredentials'
+type Store_CreateGithubCredentials_Call struct {
+ *mock.Call
+}
+
+// CreateGithubCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - param params.CreateGithubCredentialsParams
+func (_e *Store_Expecter) CreateGithubCredentials(ctx interface{}, param interface{}) *Store_CreateGithubCredentials_Call {
+ return &Store_CreateGithubCredentials_Call{Call: _e.mock.On("CreateGithubCredentials", ctx, param)}
+}
+
+func (_c *Store_CreateGithubCredentials_Call) Run(run func(ctx context.Context, param params.CreateGithubCredentialsParams)) *Store_CreateGithubCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.CreateGithubCredentialsParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateGithubCredentials_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_CreateGithubCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateGithubCredentials_Call) RunAndReturn(run func(context.Context, params.CreateGithubCredentialsParams) (params.ForgeCredentials, error)) *Store_CreateGithubCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateGithubEndpoint provides a mock function with given fields: ctx, param
+func (_m *Store) CreateGithubEndpoint(ctx context.Context, param params.CreateGithubEndpointParams) (params.ForgeEndpoint, error) {
+ ret := _m.Called(ctx, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateGithubEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGithubEndpointParams) (params.ForgeEndpoint, error)); ok {
+ return rf(ctx, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.CreateGithubEndpointParams) params.ForgeEndpoint); ok {
+ r0 = rf(ctx, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEndpoint)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.CreateGithubEndpointParams) error); ok {
+ r1 = rf(ctx, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_CreateGithubEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateGithubEndpoint'
+type Store_CreateGithubEndpoint_Call struct {
+ *mock.Call
+}
+
+// CreateGithubEndpoint is a helper method to define mock.On call
+// - ctx context.Context
+// - param params.CreateGithubEndpointParams
+func (_e *Store_Expecter) CreateGithubEndpoint(ctx interface{}, param interface{}) *Store_CreateGithubEndpoint_Call {
+ return &Store_CreateGithubEndpoint_Call{Call: _e.mock.On("CreateGithubEndpoint", ctx, param)}
+}
+
+func (_c *Store_CreateGithubEndpoint_Call) Run(run func(ctx context.Context, param params.CreateGithubEndpointParams)) *Store_CreateGithubEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.CreateGithubEndpointParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateGithubEndpoint_Call) Return(_a0 params.ForgeEndpoint, _a1 error) *Store_CreateGithubEndpoint_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateGithubEndpoint_Call) RunAndReturn(run func(context.Context, params.CreateGithubEndpointParams) (params.ForgeEndpoint, error)) *Store_CreateGithubEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateInstance provides a mock function with given fields: ctx, poolID, param
func (_m *Store) CreateInstance(ctx context.Context, poolID string, param params.CreateInstanceParams) (params.Instance, error) {
ret := _m.Called(ctx, poolID, param)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateInstance")
+ }
+
var r0 params.Instance
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, params.CreateInstanceParams) (params.Instance, error)); ok {
@@ -124,23 +657,114 @@ func (_m *Store) CreateInstance(ctx context.Context, poolID string, param params
return r0, r1
}
-// CreateOrganization provides a mock function with given fields: ctx, name, credentialsName, webhookSecret
-func (_m *Store) CreateOrganization(ctx context.Context, name string, credentialsName string, webhookSecret string) (params.Organization, error) {
- ret := _m.Called(ctx, name, credentialsName, webhookSecret)
+// Store_CreateInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateInstance'
+type Store_CreateInstance_Call struct {
+ *mock.Call
+}
+
+// CreateInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+// - param params.CreateInstanceParams
+func (_e *Store_Expecter) CreateInstance(ctx interface{}, poolID interface{}, param interface{}) *Store_CreateInstance_Call {
+ return &Store_CreateInstance_Call{Call: _e.mock.On("CreateInstance", ctx, poolID, param)}
+}
+
+func (_c *Store_CreateInstance_Call) Run(run func(ctx context.Context, poolID string, param params.CreateInstanceParams)) *Store_CreateInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.CreateInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateInstance_Call) Return(_a0 params.Instance, _a1 error) *Store_CreateInstance_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateInstance_Call) RunAndReturn(run func(context.Context, string, params.CreateInstanceParams) (params.Instance, error)) *Store_CreateInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateOrUpdateJob provides a mock function with given fields: ctx, job
+func (_m *Store) CreateOrUpdateJob(ctx context.Context, job params.Job) (params.Job, error) {
+ ret := _m.Called(ctx, job)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateOrUpdateJob")
+ }
+
+ var r0 params.Job
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.Job) (params.Job, error)); ok {
+ return rf(ctx, job)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.Job) params.Job); ok {
+ r0 = rf(ctx, job)
+ } else {
+ r0 = ret.Get(0).(params.Job)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.Job) error); ok {
+ r1 = rf(ctx, job)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_CreateOrUpdateJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateOrUpdateJob'
+type Store_CreateOrUpdateJob_Call struct {
+ *mock.Call
+}
+
+// CreateOrUpdateJob is a helper method to define mock.On call
+// - ctx context.Context
+// - job params.Job
+func (_e *Store_Expecter) CreateOrUpdateJob(ctx interface{}, job interface{}) *Store_CreateOrUpdateJob_Call {
+ return &Store_CreateOrUpdateJob_Call{Call: _e.mock.On("CreateOrUpdateJob", ctx, job)}
+}
+
+func (_c *Store_CreateOrUpdateJob_Call) Run(run func(ctx context.Context, job params.Job)) *Store_CreateOrUpdateJob_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.Job))
+ })
+ return _c
+}
+
+func (_c *Store_CreateOrUpdateJob_Call) Return(_a0 params.Job, _a1 error) *Store_CreateOrUpdateJob_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateOrUpdateJob_Call) RunAndReturn(run func(context.Context, params.Job) (params.Job, error)) *Store_CreateOrUpdateJob_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateOrganization provides a mock function with given fields: ctx, name, credentials, webhookSecret, poolBalancerType
+func (_m *Store) CreateOrganization(ctx context.Context, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (params.Organization, error) {
+ ret := _m.Called(ctx, name, credentials, webhookSecret, poolBalancerType)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateOrganization")
+ }
var r0 params.Organization
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (params.Organization, error)); ok {
- return rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Organization, error)); ok {
+ return rf(ctx, name, credentials, webhookSecret, poolBalancerType)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string) params.Organization); ok {
- r0 = rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) params.Organization); ok {
+ r0 = rf(ctx, name, credentials, webhookSecret, poolBalancerType)
} else {
r0 = ret.Get(0).(params.Organization)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
- r1 = rf(ctx, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) error); ok {
+ r1 = rf(ctx, name, credentials, webhookSecret, poolBalancerType)
} else {
r1 = ret.Error(1)
}
@@ -148,47 +772,59 @@ func (_m *Store) CreateOrganization(ctx context.Context, name string, credential
return r0, r1
}
-// CreateOrganizationPool provides a mock function with given fields: ctx, orgId, param
-func (_m *Store) CreateOrganizationPool(ctx context.Context, orgId string, param params.CreatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, orgId, param)
-
- var r0 params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, orgId, param)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) params.Pool); ok {
- r0 = rf(ctx, orgId, param)
- } else {
- r0 = ret.Get(0).(params.Pool)
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string, params.CreatePoolParams) error); ok {
- r1 = rf(ctx, orgId, param)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// Store_CreateOrganization_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateOrganization'
+type Store_CreateOrganization_Call struct {
+ *mock.Call
}
-// CreateRepository provides a mock function with given fields: ctx, owner, name, credentialsName, webhookSecret
-func (_m *Store) CreateRepository(ctx context.Context, owner string, name string, credentialsName string, webhookSecret string) (params.Repository, error) {
- ret := _m.Called(ctx, owner, name, credentialsName, webhookSecret)
+// CreateOrganization is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - credentials params.ForgeCredentials
+// - webhookSecret string
+// - poolBalancerType params.PoolBalancerType
+func (_e *Store_Expecter) CreateOrganization(ctx interface{}, name interface{}, credentials interface{}, webhookSecret interface{}, poolBalancerType interface{}) *Store_CreateOrganization_Call {
+ return &Store_CreateOrganization_Call{Call: _e.mock.On("CreateOrganization", ctx, name, credentials, webhookSecret, poolBalancerType)}
+}
+
+func (_c *Store_CreateOrganization_Call) Run(run func(ctx context.Context, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType)) *Store_CreateOrganization_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.ForgeCredentials), args[3].(string), args[4].(params.PoolBalancerType))
+ })
+ return _c
+}
+
+func (_c *Store_CreateOrganization_Call) Return(org params.Organization, err error) *Store_CreateOrganization_Call {
+ _c.Call.Return(org, err)
+ return _c
+}
+
+func (_c *Store_CreateOrganization_Call) RunAndReturn(run func(context.Context, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Organization, error)) *Store_CreateOrganization_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateRepository provides a mock function with given fields: ctx, owner, name, credentials, webhookSecret, poolBalancerType
+func (_m *Store) CreateRepository(ctx context.Context, owner string, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (params.Repository, error) {
+ ret := _m.Called(ctx, owner, name, credentials, webhookSecret, poolBalancerType)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateRepository")
+ }
var r0 params.Repository
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) (params.Repository, error)); ok {
- return rf(ctx, owner, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Repository, error)); ok {
+ return rf(ctx, owner, name, credentials, webhookSecret, poolBalancerType)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) params.Repository); ok {
- r0 = rf(ctx, owner, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, params.ForgeCredentials, string, params.PoolBalancerType) params.Repository); ok {
+ r0 = rf(ctx, owner, name, credentials, webhookSecret, poolBalancerType)
} else {
r0 = ret.Get(0).(params.Repository)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok {
- r1 = rf(ctx, owner, name, credentialsName, webhookSecret)
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, params.ForgeCredentials, string, params.PoolBalancerType) error); ok {
+ r1 = rf(ctx, owner, name, credentials, webhookSecret, poolBalancerType)
} else {
r1 = ret.Error(1)
}
@@ -196,23 +832,60 @@ func (_m *Store) CreateRepository(ctx context.Context, owner string, name string
return r0, r1
}
-// CreateRepositoryPool provides a mock function with given fields: ctx, repoId, param
-func (_m *Store) CreateRepositoryPool(ctx context.Context, repoId string, param params.CreatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, repoId, param)
+// Store_CreateRepository_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateRepository'
+type Store_CreateRepository_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
+// CreateRepository is a helper method to define mock.On call
+// - ctx context.Context
+// - owner string
+// - name string
+// - credentials params.ForgeCredentials
+// - webhookSecret string
+// - poolBalancerType params.PoolBalancerType
+func (_e *Store_Expecter) CreateRepository(ctx interface{}, owner interface{}, name interface{}, credentials interface{}, webhookSecret interface{}, poolBalancerType interface{}) *Store_CreateRepository_Call {
+ return &Store_CreateRepository_Call{Call: _e.mock.On("CreateRepository", ctx, owner, name, credentials, webhookSecret, poolBalancerType)}
+}
+
+func (_c *Store_CreateRepository_Call) Run(run func(ctx context.Context, owner string, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType)) *Store_CreateRepository_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(params.ForgeCredentials), args[4].(string), args[5].(params.PoolBalancerType))
+ })
+ return _c
+}
+
+func (_c *Store_CreateRepository_Call) Return(param params.Repository, err error) *Store_CreateRepository_Call {
+ _c.Call.Return(param, err)
+ return _c
+}
+
+func (_c *Store_CreateRepository_Call) RunAndReturn(run func(context.Context, string, string, params.ForgeCredentials, string, params.PoolBalancerType) (params.Repository, error)) *Store_CreateRepository_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateScaleSetInstance provides a mock function with given fields: _a0, scaleSetID, param
+func (_m *Store) CreateScaleSetInstance(_a0 context.Context, scaleSetID uint, param params.CreateInstanceParams) (params.Instance, error) {
+ ret := _m.Called(_a0, scaleSetID, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateScaleSetInstance")
+ }
+
+ var r0 params.Instance
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, repoId, param)
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.CreateInstanceParams) (params.Instance, error)); ok {
+ return rf(_a0, scaleSetID, param)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, params.CreatePoolParams) params.Pool); ok {
- r0 = rf(ctx, repoId, param)
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.CreateInstanceParams) params.Instance); ok {
+ r0 = rf(_a0, scaleSetID, param)
} else {
- r0 = ret.Get(0).(params.Pool)
+ r0 = ret.Get(0).(params.Instance)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, params.CreatePoolParams) error); ok {
- r1 = rf(ctx, repoId, param)
+ if rf, ok := ret.Get(1).(func(context.Context, uint, params.CreateInstanceParams) error); ok {
+ r1 = rf(_a0, scaleSetID, param)
} else {
r1 = ret.Error(1)
}
@@ -220,10 +893,44 @@ func (_m *Store) CreateRepositoryPool(ctx context.Context, repoId string, param
return r0, r1
}
+// Store_CreateScaleSetInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateScaleSetInstance'
+type Store_CreateScaleSetInstance_Call struct {
+ *mock.Call
+}
+
+// CreateScaleSetInstance is a helper method to define mock.On call
+// - _a0 context.Context
+// - scaleSetID uint
+// - param params.CreateInstanceParams
+func (_e *Store_Expecter) CreateScaleSetInstance(_a0 interface{}, scaleSetID interface{}, param interface{}) *Store_CreateScaleSetInstance_Call {
+ return &Store_CreateScaleSetInstance_Call{Call: _e.mock.On("CreateScaleSetInstance", _a0, scaleSetID, param)}
+}
+
+func (_c *Store_CreateScaleSetInstance_Call) Run(run func(_a0 context.Context, scaleSetID uint, param params.CreateInstanceParams)) *Store_CreateScaleSetInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(params.CreateInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateScaleSetInstance_Call) Return(instance params.Instance, err error) *Store_CreateScaleSetInstance_Call {
+ _c.Call.Return(instance, err)
+ return _c
+}
+
+func (_c *Store_CreateScaleSetInstance_Call) RunAndReturn(run func(context.Context, uint, params.CreateInstanceParams) (params.Instance, error)) *Store_CreateScaleSetInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateUser provides a mock function with given fields: ctx, user
func (_m *Store) CreateUser(ctx context.Context, user params.NewUserParams) (params.User, error) {
ret := _m.Called(ctx, user)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateUser")
+ }
+
var r0 params.User
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, params.NewUserParams) (params.User, error)); ok {
@@ -244,10 +951,89 @@ func (_m *Store) CreateUser(ctx context.Context, user params.NewUserParams) (par
return r0, r1
}
+// Store_CreateUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateUser'
+type Store_CreateUser_Call struct {
+ *mock.Call
+}
+
+// CreateUser is a helper method to define mock.On call
+// - ctx context.Context
+// - user params.NewUserParams
+func (_e *Store_Expecter) CreateUser(ctx interface{}, user interface{}) *Store_CreateUser_Call {
+ return &Store_CreateUser_Call{Call: _e.mock.On("CreateUser", ctx, user)}
+}
+
+func (_c *Store_CreateUser_Call) Run(run func(ctx context.Context, user params.NewUserParams)) *Store_CreateUser_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.NewUserParams))
+ })
+ return _c
+}
+
+func (_c *Store_CreateUser_Call) Return(_a0 params.User, _a1 error) *Store_CreateUser_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_CreateUser_Call) RunAndReturn(run func(context.Context, params.NewUserParams) (params.User, error)) *Store_CreateUser_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteCompletedJobs provides a mock function with given fields: ctx
+func (_m *Store) DeleteCompletedJobs(ctx context.Context) error {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteCompletedJobs")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteCompletedJobs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteCompletedJobs'
+type Store_DeleteCompletedJobs_Call struct {
+ *mock.Call
+}
+
+// DeleteCompletedJobs is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) DeleteCompletedJobs(ctx interface{}) *Store_DeleteCompletedJobs_Call {
+ return &Store_DeleteCompletedJobs_Call{Call: _e.mock.On("DeleteCompletedJobs", ctx)}
+}
+
+func (_c *Store_DeleteCompletedJobs_Call) Run(run func(ctx context.Context)) *Store_DeleteCompletedJobs_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteCompletedJobs_Call) Return(_a0 error) *Store_DeleteCompletedJobs_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteCompletedJobs_Call) RunAndReturn(run func(context.Context) error) *Store_DeleteCompletedJobs_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteEnterprise provides a mock function with given fields: ctx, enterpriseID
func (_m *Store) DeleteEnterprise(ctx context.Context, enterpriseID string) error {
ret := _m.Called(ctx, enterpriseID)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteEnterprise")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, enterpriseID)
@@ -258,13 +1044,46 @@ func (_m *Store) DeleteEnterprise(ctx context.Context, enterpriseID string) erro
return r0
}
-// DeleteEnterprisePool provides a mock function with given fields: ctx, enterpriseID, poolID
-func (_m *Store) DeleteEnterprisePool(ctx context.Context, enterpriseID string, poolID string) error {
- ret := _m.Called(ctx, enterpriseID, poolID)
+// Store_DeleteEnterprise_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteEnterprise'
+type Store_DeleteEnterprise_Call struct {
+ *mock.Call
+}
+
+// DeleteEnterprise is a helper method to define mock.On call
+// - ctx context.Context
+// - enterpriseID string
+func (_e *Store_Expecter) DeleteEnterprise(ctx interface{}, enterpriseID interface{}) *Store_DeleteEnterprise_Call {
+ return &Store_DeleteEnterprise_Call{Call: _e.mock.On("DeleteEnterprise", ctx, enterpriseID)}
+}
+
+func (_c *Store_DeleteEnterprise_Call) Run(run func(ctx context.Context, enterpriseID string)) *Store_DeleteEnterprise_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteEnterprise_Call) Return(_a0 error) *Store_DeleteEnterprise_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteEnterprise_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteEnterprise_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteEntityPool provides a mock function with given fields: ctx, entity, poolID
+func (_m *Store) DeleteEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string) error {
+ ret := _m.Called(ctx, entity, poolID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteEntityPool")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
- r0 = rf(ctx, enterpriseID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, string) error); ok {
+ r0 = rf(ctx, entity, poolID)
} else {
r0 = ret.Error(0)
}
@@ -272,13 +1091,47 @@ func (_m *Store) DeleteEnterprisePool(ctx context.Context, enterpriseID string,
return r0
}
-// DeleteInstance provides a mock function with given fields: ctx, poolID, instanceName
-func (_m *Store) DeleteInstance(ctx context.Context, poolID string, instanceName string) error {
- ret := _m.Called(ctx, poolID, instanceName)
+// Store_DeleteEntityPool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteEntityPool'
+type Store_DeleteEntityPool_Call struct {
+ *mock.Call
+}
+
+// DeleteEntityPool is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+// - poolID string
+func (_e *Store_Expecter) DeleteEntityPool(ctx interface{}, entity interface{}, poolID interface{}) *Store_DeleteEntityPool_Call {
+ return &Store_DeleteEntityPool_Call{Call: _e.mock.On("DeleteEntityPool", ctx, entity, poolID)}
+}
+
+func (_c *Store_DeleteEntityPool_Call) Run(run func(ctx context.Context, entity params.ForgeEntity, poolID string)) *Store_DeleteEntityPool_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteEntityPool_Call) Return(_a0 error) *Store_DeleteEntityPool_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteEntityPool_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, string) error) *Store_DeleteEntityPool_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteGiteaCredentials provides a mock function with given fields: ctx, id
+func (_m *Store) DeleteGiteaCredentials(ctx context.Context, id uint) error {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteGiteaCredentials")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
- r0 = rf(ctx, poolID, instanceName)
+ if rf, ok := ret.Get(0).(func(context.Context, uint) error); ok {
+ r0 = rf(ctx, id)
} else {
r0 = ret.Error(0)
}
@@ -286,10 +1139,326 @@ func (_m *Store) DeleteInstance(ctx context.Context, poolID string, instanceName
return r0
}
+// Store_DeleteGiteaCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteGiteaCredentials'
+type Store_DeleteGiteaCredentials_Call struct {
+ *mock.Call
+}
+
+// DeleteGiteaCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+func (_e *Store_Expecter) DeleteGiteaCredentials(ctx interface{}, id interface{}) *Store_DeleteGiteaCredentials_Call {
+ return &Store_DeleteGiteaCredentials_Call{Call: _e.mock.On("DeleteGiteaCredentials", ctx, id)}
+}
+
+func (_c *Store_DeleteGiteaCredentials_Call) Run(run func(ctx context.Context, id uint)) *Store_DeleteGiteaCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteGiteaCredentials_Call) Return(err error) *Store_DeleteGiteaCredentials_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *Store_DeleteGiteaCredentials_Call) RunAndReturn(run func(context.Context, uint) error) *Store_DeleteGiteaCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteGiteaEndpoint provides a mock function with given fields: _a0, name
+func (_m *Store) DeleteGiteaEndpoint(_a0 context.Context, name string) error {
+ ret := _m.Called(_a0, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteGiteaEndpoint")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
+ r0 = rf(_a0, name)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteGiteaEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteGiteaEndpoint'
+type Store_DeleteGiteaEndpoint_Call struct {
+ *mock.Call
+}
+
+// DeleteGiteaEndpoint is a helper method to define mock.On call
+// - _a0 context.Context
+// - name string
+func (_e *Store_Expecter) DeleteGiteaEndpoint(_a0 interface{}, name interface{}) *Store_DeleteGiteaEndpoint_Call {
+ return &Store_DeleteGiteaEndpoint_Call{Call: _e.mock.On("DeleteGiteaEndpoint", _a0, name)}
+}
+
+func (_c *Store_DeleteGiteaEndpoint_Call) Run(run func(_a0 context.Context, name string)) *Store_DeleteGiteaEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteGiteaEndpoint_Call) Return(err error) *Store_DeleteGiteaEndpoint_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *Store_DeleteGiteaEndpoint_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteGiteaEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteGithubCredentials provides a mock function with given fields: ctx, id
+func (_m *Store) DeleteGithubCredentials(ctx context.Context, id uint) error {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteGithubCredentials")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint) error); ok {
+ r0 = rf(ctx, id)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteGithubCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteGithubCredentials'
+type Store_DeleteGithubCredentials_Call struct {
+ *mock.Call
+}
+
+// DeleteGithubCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+func (_e *Store_Expecter) DeleteGithubCredentials(ctx interface{}, id interface{}) *Store_DeleteGithubCredentials_Call {
+ return &Store_DeleteGithubCredentials_Call{Call: _e.mock.On("DeleteGithubCredentials", ctx, id)}
+}
+
+func (_c *Store_DeleteGithubCredentials_Call) Run(run func(ctx context.Context, id uint)) *Store_DeleteGithubCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteGithubCredentials_Call) Return(_a0 error) *Store_DeleteGithubCredentials_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteGithubCredentials_Call) RunAndReturn(run func(context.Context, uint) error) *Store_DeleteGithubCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteGithubEndpoint provides a mock function with given fields: ctx, name
+func (_m *Store) DeleteGithubEndpoint(ctx context.Context, name string) error {
+ ret := _m.Called(ctx, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteGithubEndpoint")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
+ r0 = rf(ctx, name)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteGithubEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteGithubEndpoint'
+type Store_DeleteGithubEndpoint_Call struct {
+ *mock.Call
+}
+
+// DeleteGithubEndpoint is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+func (_e *Store_Expecter) DeleteGithubEndpoint(ctx interface{}, name interface{}) *Store_DeleteGithubEndpoint_Call {
+ return &Store_DeleteGithubEndpoint_Call{Call: _e.mock.On("DeleteGithubEndpoint", ctx, name)}
+}
+
+func (_c *Store_DeleteGithubEndpoint_Call) Run(run func(ctx context.Context, name string)) *Store_DeleteGithubEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteGithubEndpoint_Call) Return(_a0 error) *Store_DeleteGithubEndpoint_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteGithubEndpoint_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteGithubEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteInstance provides a mock function with given fields: ctx, poolID, instanceNameOrID
+func (_m *Store) DeleteInstance(ctx context.Context, poolID string, instanceNameOrID string) error {
+ ret := _m.Called(ctx, poolID, instanceNameOrID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteInstance")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
+ r0 = rf(ctx, poolID, instanceNameOrID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteInstance'
+type Store_DeleteInstance_Call struct {
+ *mock.Call
+}
+
+// DeleteInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+// - instanceNameOrID string
+func (_e *Store_Expecter) DeleteInstance(ctx interface{}, poolID interface{}, instanceNameOrID interface{}) *Store_DeleteInstance_Call {
+ return &Store_DeleteInstance_Call{Call: _e.mock.On("DeleteInstance", ctx, poolID, instanceNameOrID)}
+}
+
+func (_c *Store_DeleteInstance_Call) Run(run func(ctx context.Context, poolID string, instanceNameOrID string)) *Store_DeleteInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteInstance_Call) Return(_a0 error) *Store_DeleteInstance_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteInstance_Call) RunAndReturn(run func(context.Context, string, string) error) *Store_DeleteInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteInstanceByName provides a mock function with given fields: ctx, instanceName
+func (_m *Store) DeleteInstanceByName(ctx context.Context, instanceName string) error {
+ ret := _m.Called(ctx, instanceName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteInstanceByName")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
+ r0 = rf(ctx, instanceName)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteInstanceByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteInstanceByName'
+type Store_DeleteInstanceByName_Call struct {
+ *mock.Call
+}
+
+// DeleteInstanceByName is a helper method to define mock.On call
+// - ctx context.Context
+// - instanceName string
+func (_e *Store_Expecter) DeleteInstanceByName(ctx interface{}, instanceName interface{}) *Store_DeleteInstanceByName_Call {
+ return &Store_DeleteInstanceByName_Call{Call: _e.mock.On("DeleteInstanceByName", ctx, instanceName)}
+}
+
+func (_c *Store_DeleteInstanceByName_Call) Run(run func(ctx context.Context, instanceName string)) *Store_DeleteInstanceByName_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteInstanceByName_Call) Return(_a0 error) *Store_DeleteInstanceByName_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteInstanceByName_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteInstanceByName_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteJob provides a mock function with given fields: ctx, jobID
+func (_m *Store) DeleteJob(ctx context.Context, jobID int64) error {
+ ret := _m.Called(ctx, jobID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteJob")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
+ r0 = rf(ctx, jobID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_DeleteJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteJob'
+type Store_DeleteJob_Call struct {
+ *mock.Call
+}
+
+// DeleteJob is a helper method to define mock.On call
+// - ctx context.Context
+// - jobID int64
+func (_e *Store_Expecter) DeleteJob(ctx interface{}, jobID interface{}) *Store_DeleteJob_Call {
+ return &Store_DeleteJob_Call{Call: _e.mock.On("DeleteJob", ctx, jobID)}
+}
+
+func (_c *Store_DeleteJob_Call) Run(run func(ctx context.Context, jobID int64)) *Store_DeleteJob_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteJob_Call) Return(_a0 error) *Store_DeleteJob_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteJob_Call) RunAndReturn(run func(context.Context, int64) error) *Store_DeleteJob_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteOrganization provides a mock function with given fields: ctx, orgID
func (_m *Store) DeleteOrganization(ctx context.Context, orgID string) error {
ret := _m.Called(ctx, orgID)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteOrganization")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, orgID)
@@ -300,24 +1469,43 @@ func (_m *Store) DeleteOrganization(ctx context.Context, orgID string) error {
return r0
}
-// DeleteOrganizationPool provides a mock function with given fields: ctx, orgID, poolID
-func (_m *Store) DeleteOrganizationPool(ctx context.Context, orgID string, poolID string) error {
- ret := _m.Called(ctx, orgID, poolID)
+// Store_DeleteOrganization_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteOrganization'
+type Store_DeleteOrganization_Call struct {
+ *mock.Call
+}
- var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
- r0 = rf(ctx, orgID, poolID)
- } else {
- r0 = ret.Error(0)
- }
+// DeleteOrganization is a helper method to define mock.On call
+// - ctx context.Context
+// - orgID string
+func (_e *Store_Expecter) DeleteOrganization(ctx interface{}, orgID interface{}) *Store_DeleteOrganization_Call {
+ return &Store_DeleteOrganization_Call{Call: _e.mock.On("DeleteOrganization", ctx, orgID)}
+}
- return r0
+func (_c *Store_DeleteOrganization_Call) Run(run func(ctx context.Context, orgID string)) *Store_DeleteOrganization_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteOrganization_Call) Return(_a0 error) *Store_DeleteOrganization_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteOrganization_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteOrganization_Call {
+ _c.Call.Return(run)
+ return _c
}
// DeletePoolByID provides a mock function with given fields: ctx, poolID
func (_m *Store) DeletePoolByID(ctx context.Context, poolID string) error {
ret := _m.Called(ctx, poolID)
+ if len(ret) == 0 {
+ panic("no return value specified for DeletePoolByID")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, poolID)
@@ -328,10 +1516,43 @@ func (_m *Store) DeletePoolByID(ctx context.Context, poolID string) error {
return r0
}
+// Store_DeletePoolByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeletePoolByID'
+type Store_DeletePoolByID_Call struct {
+ *mock.Call
+}
+
+// DeletePoolByID is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+func (_e *Store_Expecter) DeletePoolByID(ctx interface{}, poolID interface{}) *Store_DeletePoolByID_Call {
+ return &Store_DeletePoolByID_Call{Call: _e.mock.On("DeletePoolByID", ctx, poolID)}
+}
+
+func (_c *Store_DeletePoolByID_Call) Run(run func(ctx context.Context, poolID string)) *Store_DeletePoolByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeletePoolByID_Call) Return(_a0 error) *Store_DeletePoolByID_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeletePoolByID_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeletePoolByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteRepository provides a mock function with given fields: ctx, repoID
func (_m *Store) DeleteRepository(ctx context.Context, repoID string) error {
ret := _m.Called(ctx, repoID)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteRepository")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, repoID)
@@ -342,13 +1563,46 @@ func (_m *Store) DeleteRepository(ctx context.Context, repoID string) error {
return r0
}
-// DeleteRepositoryPool provides a mock function with given fields: ctx, repoID, poolID
-func (_m *Store) DeleteRepositoryPool(ctx context.Context, repoID string, poolID string) error {
- ret := _m.Called(ctx, repoID, poolID)
+// Store_DeleteRepository_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteRepository'
+type Store_DeleteRepository_Call struct {
+ *mock.Call
+}
+
+// DeleteRepository is a helper method to define mock.On call
+// - ctx context.Context
+// - repoID string
+func (_e *Store_Expecter) DeleteRepository(ctx interface{}, repoID interface{}) *Store_DeleteRepository_Call {
+ return &Store_DeleteRepository_Call{Call: _e.mock.On("DeleteRepository", ctx, repoID)}
+}
+
+func (_c *Store_DeleteRepository_Call) Run(run func(ctx context.Context, repoID string)) *Store_DeleteRepository_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteRepository_Call) Return(_a0 error) *Store_DeleteRepository_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_DeleteRepository_Call) RunAndReturn(run func(context.Context, string) error) *Store_DeleteRepository_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteScaleSetByID provides a mock function with given fields: ctx, scaleSetID
+func (_m *Store) DeleteScaleSetByID(ctx context.Context, scaleSetID uint) error {
+ ret := _m.Called(ctx, scaleSetID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteScaleSetByID")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
- r0 = rf(ctx, repoID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, uint) error); ok {
+ r0 = rf(ctx, scaleSetID)
} else {
r0 = ret.Error(0)
}
@@ -356,23 +1610,58 @@ func (_m *Store) DeleteRepositoryPool(ctx context.Context, repoID string, poolID
return r0
}
-// FindEnterprisePoolByTags provides a mock function with given fields: ctx, enterpriseID, tags
-func (_m *Store) FindEnterprisePoolByTags(ctx context.Context, enterpriseID string, tags []string) (params.Pool, error) {
- ret := _m.Called(ctx, enterpriseID, tags)
+// Store_DeleteScaleSetByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteScaleSetByID'
+type Store_DeleteScaleSetByID_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
+// DeleteScaleSetByID is a helper method to define mock.On call
+// - ctx context.Context
+// - scaleSetID uint
+func (_e *Store_Expecter) DeleteScaleSetByID(ctx interface{}, scaleSetID interface{}) *Store_DeleteScaleSetByID_Call {
+ return &Store_DeleteScaleSetByID_Call{Call: _e.mock.On("DeleteScaleSetByID", ctx, scaleSetID)}
+}
+
+func (_c *Store_DeleteScaleSetByID_Call) Run(run func(ctx context.Context, scaleSetID uint)) *Store_DeleteScaleSetByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint))
+ })
+ return _c
+}
+
+func (_c *Store_DeleteScaleSetByID_Call) Return(err error) *Store_DeleteScaleSetByID_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *Store_DeleteScaleSetByID_Call) RunAndReturn(run func(context.Context, uint) error) *Store_DeleteScaleSetByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// FindPoolsMatchingAllTags provides a mock function with given fields: ctx, entityType, entityID, tags
+func (_m *Store) FindPoolsMatchingAllTags(ctx context.Context, entityType params.ForgeEntityType, entityID string, tags []string) ([]params.Pool, error) {
+ ret := _m.Called(ctx, entityType, entityID, tags)
+
+ if len(ret) == 0 {
+ panic("no return value specified for FindPoolsMatchingAllTags")
+ }
+
+ var r0 []params.Pool
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) (params.Pool, error)); ok {
- return rf(ctx, enterpriseID, tags)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string, []string) ([]params.Pool, error)); ok {
+ return rf(ctx, entityType, entityID, tags)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) params.Pool); ok {
- r0 = rf(ctx, enterpriseID, tags)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string, []string) []params.Pool); ok {
+ r0 = rf(ctx, entityType, entityID, tags)
} else {
- r0 = ret.Get(0).(params.Pool)
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.Pool)
+ }
}
- if rf, ok := ret.Get(1).(func(context.Context, string, []string) error); ok {
- r1 = rf(ctx, enterpriseID, tags)
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntityType, string, []string) error); ok {
+ r1 = rf(ctx, entityType, entityID, tags)
} else {
r1 = ret.Error(1)
}
@@ -380,23 +1669,58 @@ func (_m *Store) FindEnterprisePoolByTags(ctx context.Context, enterpriseID stri
return r0, r1
}
-// FindOrganizationPoolByTags provides a mock function with given fields: ctx, orgID, tags
-func (_m *Store) FindOrganizationPoolByTags(ctx context.Context, orgID string, tags []string) (params.Pool, error) {
- ret := _m.Called(ctx, orgID, tags)
+// Store_FindPoolsMatchingAllTags_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FindPoolsMatchingAllTags'
+type Store_FindPoolsMatchingAllTags_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
+// FindPoolsMatchingAllTags is a helper method to define mock.On call
+// - ctx context.Context
+// - entityType params.ForgeEntityType
+// - entityID string
+// - tags []string
+func (_e *Store_Expecter) FindPoolsMatchingAllTags(ctx interface{}, entityType interface{}, entityID interface{}, tags interface{}) *Store_FindPoolsMatchingAllTags_Call {
+ return &Store_FindPoolsMatchingAllTags_Call{Call: _e.mock.On("FindPoolsMatchingAllTags", ctx, entityType, entityID, tags)}
+}
+
+func (_c *Store_FindPoolsMatchingAllTags_Call) Run(run func(ctx context.Context, entityType params.ForgeEntityType, entityID string, tags []string)) *Store_FindPoolsMatchingAllTags_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntityType), args[2].(string), args[3].([]string))
+ })
+ return _c
+}
+
+func (_c *Store_FindPoolsMatchingAllTags_Call) Return(_a0 []params.Pool, _a1 error) *Store_FindPoolsMatchingAllTags_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_FindPoolsMatchingAllTags_Call) RunAndReturn(run func(context.Context, params.ForgeEntityType, string, []string) ([]params.Pool, error)) *Store_FindPoolsMatchingAllTags_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetAdminUser provides a mock function with given fields: ctx
+func (_m *Store) GetAdminUser(ctx context.Context) (params.User, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetAdminUser")
+ }
+
+ var r0 params.User
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) (params.Pool, error)); ok {
- return rf(ctx, orgID, tags)
+ if rf, ok := ret.Get(0).(func(context.Context) (params.User, error)); ok {
+ return rf(ctx)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) params.Pool); ok {
- r0 = rf(ctx, orgID, tags)
+ if rf, ok := ret.Get(0).(func(context.Context) params.User); ok {
+ r0 = rf(ctx)
} else {
- r0 = ret.Get(0).(params.Pool)
+ r0 = ret.Get(0).(params.User)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, []string) error); ok {
- r1 = rf(ctx, orgID, tags)
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
} else {
r1 = ret.Error(1)
}
@@ -404,47 +1728,55 @@ func (_m *Store) FindOrganizationPoolByTags(ctx context.Context, orgID string, t
return r0, r1
}
-// FindRepositoryPoolByTags provides a mock function with given fields: ctx, repoID, tags
-func (_m *Store) FindRepositoryPoolByTags(ctx context.Context, repoID string, tags []string) (params.Pool, error) {
- ret := _m.Called(ctx, repoID, tags)
-
- var r0 params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) (params.Pool, error)); ok {
- return rf(ctx, repoID, tags)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, []string) params.Pool); ok {
- r0 = rf(ctx, repoID, tags)
- } else {
- r0 = ret.Get(0).(params.Pool)
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string, []string) error); ok {
- r1 = rf(ctx, repoID, tags)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// Store_GetAdminUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAdminUser'
+type Store_GetAdminUser_Call struct {
+ *mock.Call
}
-// GetEnterprise provides a mock function with given fields: ctx, name
-func (_m *Store) GetEnterprise(ctx context.Context, name string) (params.Enterprise, error) {
- ret := _m.Called(ctx, name)
+// GetAdminUser is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) GetAdminUser(ctx interface{}) *Store_GetAdminUser_Call {
+ return &Store_GetAdminUser_Call{Call: _e.mock.On("GetAdminUser", ctx)}
+}
+
+func (_c *Store_GetAdminUser_Call) Run(run func(ctx context.Context)) *Store_GetAdminUser_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_GetAdminUser_Call) Return(_a0 params.User, _a1 error) *Store_GetAdminUser_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetAdminUser_Call) RunAndReturn(run func(context.Context) (params.User, error)) *Store_GetAdminUser_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEnterprise provides a mock function with given fields: ctx, name, endpointName
+func (_m *Store) GetEnterprise(ctx context.Context, name string, endpointName string) (params.Enterprise, error) {
+ ret := _m.Called(ctx, name, endpointName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEnterprise")
+ }
var r0 params.Enterprise
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) (params.Enterprise, error)); ok {
- return rf(ctx, name)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Enterprise, error)); ok {
+ return rf(ctx, name, endpointName)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) params.Enterprise); ok {
- r0 = rf(ctx, name)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Enterprise); ok {
+ r0 = rf(ctx, name, endpointName)
} else {
r0 = ret.Get(0).(params.Enterprise)
}
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, name)
+ if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
+ r1 = rf(ctx, name, endpointName)
} else {
r1 = ret.Error(1)
}
@@ -452,10 +1784,44 @@ func (_m *Store) GetEnterprise(ctx context.Context, name string) (params.Enterpr
return r0, r1
}
+// Store_GetEnterprise_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnterprise'
+type Store_GetEnterprise_Call struct {
+ *mock.Call
+}
+
+// GetEnterprise is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - endpointName string
+func (_e *Store_Expecter) GetEnterprise(ctx interface{}, name interface{}, endpointName interface{}) *Store_GetEnterprise_Call {
+ return &Store_GetEnterprise_Call{Call: _e.mock.On("GetEnterprise", ctx, name, endpointName)}
+}
+
+func (_c *Store_GetEnterprise_Call) Run(run func(ctx context.Context, name string, endpointName string)) *Store_GetEnterprise_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetEnterprise_Call) Return(_a0 params.Enterprise, _a1 error) *Store_GetEnterprise_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetEnterprise_Call) RunAndReturn(run func(context.Context, string, string) (params.Enterprise, error)) *Store_GetEnterprise_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetEnterpriseByID provides a mock function with given fields: ctx, enterpriseID
func (_m *Store) GetEnterpriseByID(ctx context.Context, enterpriseID string) (params.Enterprise, error) {
ret := _m.Called(ctx, enterpriseID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetEnterpriseByID")
+ }
+
var r0 params.Enterprise
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.Enterprise, error)); ok {
@@ -476,23 +1842,56 @@ func (_m *Store) GetEnterpriseByID(ctx context.Context, enterpriseID string) (pa
return r0, r1
}
-// GetEnterprisePool provides a mock function with given fields: ctx, enterpriseID, poolID
-func (_m *Store) GetEnterprisePool(ctx context.Context, enterpriseID string, poolID string) (params.Pool, error) {
- ret := _m.Called(ctx, enterpriseID, poolID)
+// Store_GetEnterpriseByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnterpriseByID'
+type Store_GetEnterpriseByID_Call struct {
+ *mock.Call
+}
+
+// GetEnterpriseByID is a helper method to define mock.On call
+// - ctx context.Context
+// - enterpriseID string
+func (_e *Store_Expecter) GetEnterpriseByID(ctx interface{}, enterpriseID interface{}) *Store_GetEnterpriseByID_Call {
+ return &Store_GetEnterpriseByID_Call{Call: _e.mock.On("GetEnterpriseByID", ctx, enterpriseID)}
+}
+
+func (_c *Store_GetEnterpriseByID_Call) Run(run func(ctx context.Context, enterpriseID string)) *Store_GetEnterpriseByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetEnterpriseByID_Call) Return(_a0 params.Enterprise, _a1 error) *Store_GetEnterpriseByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetEnterpriseByID_Call) RunAndReturn(run func(context.Context, string) (params.Enterprise, error)) *Store_GetEnterpriseByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityPool provides a mock function with given fields: ctx, entity, poolID
+func (_m *Store) GetEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string) (params.Pool, error) {
+ ret := _m.Called(ctx, entity, poolID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityPool")
+ }
var r0 params.Pool
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Pool, error)); ok {
- return rf(ctx, enterpriseID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, string) (params.Pool, error)); ok {
+ return rf(ctx, entity, poolID)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Pool); ok {
- r0 = rf(ctx, enterpriseID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, string) params.Pool); ok {
+ r0 = rf(ctx, entity, poolID)
} else {
r0 = ret.Get(0).(params.Pool)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
- r1 = rf(ctx, enterpriseID, poolID)
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity, string) error); ok {
+ r1 = rf(ctx, entity, poolID)
} else {
r1 = ret.Error(1)
}
@@ -500,23 +1899,231 @@ func (_m *Store) GetEnterprisePool(ctx context.Context, enterpriseID string, poo
return r0, r1
}
-// GetInstanceByName provides a mock function with given fields: ctx, instanceName
-func (_m *Store) GetInstanceByName(ctx context.Context, instanceName string) (params.Instance, error) {
- ret := _m.Called(ctx, instanceName)
+// Store_GetEntityPool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityPool'
+type Store_GetEntityPool_Call struct {
+ *mock.Call
+}
- var r0 params.Instance
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) (params.Instance, error)); ok {
- return rf(ctx, instanceName)
+// GetEntityPool is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+// - poolID string
+func (_e *Store_Expecter) GetEntityPool(ctx interface{}, entity interface{}, poolID interface{}) *Store_GetEntityPool_Call {
+ return &Store_GetEntityPool_Call{Call: _e.mock.On("GetEntityPool", ctx, entity, poolID)}
+}
+
+func (_c *Store_GetEntityPool_Call) Run(run func(ctx context.Context, entity params.ForgeEntity, poolID string)) *Store_GetEntityPool_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetEntityPool_Call) Return(_a0 params.Pool, _a1 error) *Store_GetEntityPool_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetEntityPool_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, string) (params.Pool, error)) *Store_GetEntityPool_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetForgeEntity provides a mock function with given fields: _a0, entityType, entityID
+func (_m *Store) GetForgeEntity(_a0 context.Context, entityType params.ForgeEntityType, entityID string) (params.ForgeEntity, error) {
+ ret := _m.Called(_a0, entityType, entityID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetForgeEntity")
}
- if rf, ok := ret.Get(0).(func(context.Context, string) params.Instance); ok {
- r0 = rf(ctx, instanceName)
+
+ var r0 params.ForgeEntity
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string) (params.ForgeEntity, error)); ok {
+ return rf(_a0, entityType, entityID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string) params.ForgeEntity); ok {
+ r0 = rf(_a0, entityType, entityID)
} else {
- r0 = ret.Get(0).(params.Instance)
+ r0 = ret.Get(0).(params.ForgeEntity)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntityType, string) error); ok {
+ r1 = rf(_a0, entityType, entityID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetForgeEntity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetForgeEntity'
+type Store_GetForgeEntity_Call struct {
+ *mock.Call
+}
+
+// GetForgeEntity is a helper method to define mock.On call
+// - _a0 context.Context
+// - entityType params.ForgeEntityType
+// - entityID string
+func (_e *Store_Expecter) GetForgeEntity(_a0 interface{}, entityType interface{}, entityID interface{}) *Store_GetForgeEntity_Call {
+ return &Store_GetForgeEntity_Call{Call: _e.mock.On("GetForgeEntity", _a0, entityType, entityID)}
+}
+
+func (_c *Store_GetForgeEntity_Call) Run(run func(_a0 context.Context, entityType params.ForgeEntityType, entityID string)) *Store_GetForgeEntity_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntityType), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetForgeEntity_Call) Return(_a0 params.ForgeEntity, _a1 error) *Store_GetForgeEntity_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetForgeEntity_Call) RunAndReturn(run func(context.Context, params.ForgeEntityType, string) (params.ForgeEntity, error)) *Store_GetForgeEntity_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGiteaCredentials provides a mock function with given fields: ctx, id, detailed
+func (_m *Store) GetGiteaCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, id, detailed)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGiteaCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, bool) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, id, detailed)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint, bool) params.ForgeCredentials); ok {
+ r0 = rf(ctx, id, detailed)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint, bool) error); ok {
+ r1 = rf(ctx, id, detailed)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetGiteaCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGiteaCredentials'
+type Store_GetGiteaCredentials_Call struct {
+ *mock.Call
+}
+
+// GetGiteaCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+// - detailed bool
+func (_e *Store_Expecter) GetGiteaCredentials(ctx interface{}, id interface{}, detailed interface{}) *Store_GetGiteaCredentials_Call {
+ return &Store_GetGiteaCredentials_Call{Call: _e.mock.On("GetGiteaCredentials", ctx, id, detailed)}
+}
+
+func (_c *Store_GetGiteaCredentials_Call) Run(run func(ctx context.Context, id uint, detailed bool)) *Store_GetGiteaCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(bool))
+ })
+ return _c
+}
+
+func (_c *Store_GetGiteaCredentials_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_GetGiteaCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGiteaCredentials_Call) RunAndReturn(run func(context.Context, uint, bool) (params.ForgeCredentials, error)) *Store_GetGiteaCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGiteaCredentialsByName provides a mock function with given fields: ctx, name, detailed
+func (_m *Store) GetGiteaCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, name, detailed)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGiteaCredentialsByName")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, bool) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, name, detailed)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, bool) params.ForgeCredentials); ok {
+ r0 = rf(ctx, name, detailed)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok {
+ r1 = rf(ctx, name, detailed)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetGiteaCredentialsByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGiteaCredentialsByName'
+type Store_GetGiteaCredentialsByName_Call struct {
+ *mock.Call
+}
+
+// GetGiteaCredentialsByName is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - detailed bool
+func (_e *Store_Expecter) GetGiteaCredentialsByName(ctx interface{}, name interface{}, detailed interface{}) *Store_GetGiteaCredentialsByName_Call {
+ return &Store_GetGiteaCredentialsByName_Call{Call: _e.mock.On("GetGiteaCredentialsByName", ctx, name, detailed)}
+}
+
+func (_c *Store_GetGiteaCredentialsByName_Call) Run(run func(ctx context.Context, name string, detailed bool)) *Store_GetGiteaCredentialsByName_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(bool))
+ })
+ return _c
+}
+
+func (_c *Store_GetGiteaCredentialsByName_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_GetGiteaCredentialsByName_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGiteaCredentialsByName_Call) RunAndReturn(run func(context.Context, string, bool) (params.ForgeCredentials, error)) *Store_GetGiteaCredentialsByName_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGiteaEndpoint provides a mock function with given fields: _a0, name
+func (_m *Store) GetGiteaEndpoint(_a0 context.Context, name string) (params.ForgeEndpoint, error) {
+ ret := _m.Called(_a0, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGiteaEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (params.ForgeEndpoint, error)); ok {
+ return rf(_a0, name)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) params.ForgeEndpoint); ok {
+ r0 = rf(_a0, name)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEndpoint)
}
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, instanceName)
+ r1 = rf(_a0, name)
} else {
r1 = ret.Error(1)
}
@@ -524,19 +2131,168 @@ func (_m *Store) GetInstanceByName(ctx context.Context, instanceName string) (pa
return r0, r1
}
-// GetOrganization provides a mock function with given fields: ctx, name
-func (_m *Store) GetOrganization(ctx context.Context, name string) (params.Organization, error) {
+// Store_GetGiteaEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGiteaEndpoint'
+type Store_GetGiteaEndpoint_Call struct {
+ *mock.Call
+}
+
+// GetGiteaEndpoint is a helper method to define mock.On call
+// - _a0 context.Context
+// - name string
+func (_e *Store_Expecter) GetGiteaEndpoint(_a0 interface{}, name interface{}) *Store_GetGiteaEndpoint_Call {
+ return &Store_GetGiteaEndpoint_Call{Call: _e.mock.On("GetGiteaEndpoint", _a0, name)}
+}
+
+func (_c *Store_GetGiteaEndpoint_Call) Run(run func(_a0 context.Context, name string)) *Store_GetGiteaEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetGiteaEndpoint_Call) Return(_a0 params.ForgeEndpoint, _a1 error) *Store_GetGiteaEndpoint_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGiteaEndpoint_Call) RunAndReturn(run func(context.Context, string) (params.ForgeEndpoint, error)) *Store_GetGiteaEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGithubCredentials provides a mock function with given fields: ctx, id, detailed
+func (_m *Store) GetGithubCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, id, detailed)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGithubCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, bool) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, id, detailed)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint, bool) params.ForgeCredentials); ok {
+ r0 = rf(ctx, id, detailed)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint, bool) error); ok {
+ r1 = rf(ctx, id, detailed)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetGithubCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGithubCredentials'
+type Store_GetGithubCredentials_Call struct {
+ *mock.Call
+}
+
+// GetGithubCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+// - detailed bool
+func (_e *Store_Expecter) GetGithubCredentials(ctx interface{}, id interface{}, detailed interface{}) *Store_GetGithubCredentials_Call {
+ return &Store_GetGithubCredentials_Call{Call: _e.mock.On("GetGithubCredentials", ctx, id, detailed)}
+}
+
+func (_c *Store_GetGithubCredentials_Call) Run(run func(ctx context.Context, id uint, detailed bool)) *Store_GetGithubCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(bool))
+ })
+ return _c
+}
+
+func (_c *Store_GetGithubCredentials_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_GetGithubCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGithubCredentials_Call) RunAndReturn(run func(context.Context, uint, bool) (params.ForgeCredentials, error)) *Store_GetGithubCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGithubCredentialsByName provides a mock function with given fields: ctx, name, detailed
+func (_m *Store) GetGithubCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, name, detailed)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetGithubCredentialsByName")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, bool) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, name, detailed)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, bool) params.ForgeCredentials); ok {
+ r0 = rf(ctx, name, detailed)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok {
+ r1 = rf(ctx, name, detailed)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetGithubCredentialsByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGithubCredentialsByName'
+type Store_GetGithubCredentialsByName_Call struct {
+ *mock.Call
+}
+
+// GetGithubCredentialsByName is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - detailed bool
+func (_e *Store_Expecter) GetGithubCredentialsByName(ctx interface{}, name interface{}, detailed interface{}) *Store_GetGithubCredentialsByName_Call {
+ return &Store_GetGithubCredentialsByName_Call{Call: _e.mock.On("GetGithubCredentialsByName", ctx, name, detailed)}
+}
+
+func (_c *Store_GetGithubCredentialsByName_Call) Run(run func(ctx context.Context, name string, detailed bool)) *Store_GetGithubCredentialsByName_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(bool))
+ })
+ return _c
+}
+
+func (_c *Store_GetGithubCredentialsByName_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_GetGithubCredentialsByName_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGithubCredentialsByName_Call) RunAndReturn(run func(context.Context, string, bool) (params.ForgeCredentials, error)) *Store_GetGithubCredentialsByName_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetGithubEndpoint provides a mock function with given fields: ctx, name
+func (_m *Store) GetGithubEndpoint(ctx context.Context, name string) (params.ForgeEndpoint, error) {
ret := _m.Called(ctx, name)
- var r0 params.Organization
+ if len(ret) == 0 {
+ panic("no return value specified for GetGithubEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) (params.Organization, error)); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, string) (params.ForgeEndpoint, error)); ok {
return rf(ctx, name)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) params.Organization); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, string) params.ForgeEndpoint); ok {
r0 = rf(ctx, name)
} else {
- r0 = ret.Get(0).(params.Organization)
+ r0 = ret.Get(0).(params.ForgeEndpoint)
}
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
@@ -548,10 +2304,215 @@ func (_m *Store) GetOrganization(ctx context.Context, name string) (params.Organ
return r0, r1
}
+// Store_GetGithubEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGithubEndpoint'
+type Store_GetGithubEndpoint_Call struct {
+ *mock.Call
+}
+
+// GetGithubEndpoint is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+func (_e *Store_Expecter) GetGithubEndpoint(ctx interface{}, name interface{}) *Store_GetGithubEndpoint_Call {
+ return &Store_GetGithubEndpoint_Call{Call: _e.mock.On("GetGithubEndpoint", ctx, name)}
+}
+
+func (_c *Store_GetGithubEndpoint_Call) Run(run func(ctx context.Context, name string)) *Store_GetGithubEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetGithubEndpoint_Call) Return(_a0 params.ForgeEndpoint, _a1 error) *Store_GetGithubEndpoint_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetGithubEndpoint_Call) RunAndReturn(run func(context.Context, string) (params.ForgeEndpoint, error)) *Store_GetGithubEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetInstance provides a mock function with given fields: ctx, instanceNameOrID
+func (_m *Store) GetInstance(ctx context.Context, instanceNameOrID string) (params.Instance, error) {
+ ret := _m.Called(ctx, instanceNameOrID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetInstance")
+ }
+
+ var r0 params.Instance
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (params.Instance, error)); ok {
+ return rf(ctx, instanceNameOrID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) params.Instance); ok {
+ r0 = rf(ctx, instanceNameOrID)
+ } else {
+ r0 = ret.Get(0).(params.Instance)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, instanceNameOrID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInstance'
+type Store_GetInstance_Call struct {
+ *mock.Call
+}
+
+// GetInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - instanceNameOrID string
+func (_e *Store_Expecter) GetInstance(ctx interface{}, instanceNameOrID interface{}) *Store_GetInstance_Call {
+ return &Store_GetInstance_Call{Call: _e.mock.On("GetInstance", ctx, instanceNameOrID)}
+}
+
+func (_c *Store_GetInstance_Call) Run(run func(ctx context.Context, instanceNameOrID string)) *Store_GetInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetInstance_Call) Return(_a0 params.Instance, _a1 error) *Store_GetInstance_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetInstance_Call) RunAndReturn(run func(context.Context, string) (params.Instance, error)) *Store_GetInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetJobByID provides a mock function with given fields: ctx, jobID
+func (_m *Store) GetJobByID(ctx context.Context, jobID int64) (params.Job, error) {
+ ret := _m.Called(ctx, jobID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetJobByID")
+ }
+
+ var r0 params.Job
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (params.Job, error)); ok {
+ return rf(ctx, jobID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) params.Job); ok {
+ r0 = rf(ctx, jobID)
+ } else {
+ r0 = ret.Get(0).(params.Job)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, jobID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetJobByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetJobByID'
+type Store_GetJobByID_Call struct {
+ *mock.Call
+}
+
+// GetJobByID is a helper method to define mock.On call
+// - ctx context.Context
+// - jobID int64
+func (_e *Store_Expecter) GetJobByID(ctx interface{}, jobID interface{}) *Store_GetJobByID_Call {
+ return &Store_GetJobByID_Call{Call: _e.mock.On("GetJobByID", ctx, jobID)}
+}
+
+func (_c *Store_GetJobByID_Call) Run(run func(ctx context.Context, jobID int64)) *Store_GetJobByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *Store_GetJobByID_Call) Return(_a0 params.Job, _a1 error) *Store_GetJobByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetJobByID_Call) RunAndReturn(run func(context.Context, int64) (params.Job, error)) *Store_GetJobByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetOrganization provides a mock function with given fields: ctx, name, endpointName
+func (_m *Store) GetOrganization(ctx context.Context, name string, endpointName string) (params.Organization, error) {
+ ret := _m.Called(ctx, name, endpointName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetOrganization")
+ }
+
+ var r0 params.Organization
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Organization, error)); ok {
+ return rf(ctx, name, endpointName)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Organization); ok {
+ r0 = rf(ctx, name, endpointName)
+ } else {
+ r0 = ret.Get(0).(params.Organization)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
+ r1 = rf(ctx, name, endpointName)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_GetOrganization_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOrganization'
+type Store_GetOrganization_Call struct {
+ *mock.Call
+}
+
+// GetOrganization is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - endpointName string
+func (_e *Store_Expecter) GetOrganization(ctx interface{}, name interface{}, endpointName interface{}) *Store_GetOrganization_Call {
+ return &Store_GetOrganization_Call{Call: _e.mock.On("GetOrganization", ctx, name, endpointName)}
+}
+
+func (_c *Store_GetOrganization_Call) Run(run func(ctx context.Context, name string, endpointName string)) *Store_GetOrganization_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetOrganization_Call) Return(_a0 params.Organization, _a1 error) *Store_GetOrganization_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetOrganization_Call) RunAndReturn(run func(context.Context, string, string) (params.Organization, error)) *Store_GetOrganization_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetOrganizationByID provides a mock function with given fields: ctx, orgID
func (_m *Store) GetOrganizationByID(ctx context.Context, orgID string) (params.Organization, error) {
ret := _m.Called(ctx, orgID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetOrganizationByID")
+ }
+
var r0 params.Organization
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.Organization, error)); ok {
@@ -572,34 +2533,43 @@ func (_m *Store) GetOrganizationByID(ctx context.Context, orgID string) (params.
return r0, r1
}
-// GetOrganizationPool provides a mock function with given fields: ctx, orgID, poolID
-func (_m *Store) GetOrganizationPool(ctx context.Context, orgID string, poolID string) (params.Pool, error) {
- ret := _m.Called(ctx, orgID, poolID)
+// Store_GetOrganizationByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOrganizationByID'
+type Store_GetOrganizationByID_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Pool, error)); ok {
- return rf(ctx, orgID, poolID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Pool); ok {
- r0 = rf(ctx, orgID, poolID)
- } else {
- r0 = ret.Get(0).(params.Pool)
- }
+// GetOrganizationByID is a helper method to define mock.On call
+// - ctx context.Context
+// - orgID string
+func (_e *Store_Expecter) GetOrganizationByID(ctx interface{}, orgID interface{}) *Store_GetOrganizationByID_Call {
+ return &Store_GetOrganizationByID_Call{Call: _e.mock.On("GetOrganizationByID", ctx, orgID)}
+}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
- r1 = rf(ctx, orgID, poolID)
- } else {
- r1 = ret.Error(1)
- }
+func (_c *Store_GetOrganizationByID_Call) Run(run func(ctx context.Context, orgID string)) *Store_GetOrganizationByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
- return r0, r1
+func (_c *Store_GetOrganizationByID_Call) Return(_a0 params.Organization, _a1 error) *Store_GetOrganizationByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetOrganizationByID_Call) RunAndReturn(run func(context.Context, string) (params.Organization, error)) *Store_GetOrganizationByID_Call {
+ _c.Call.Return(run)
+ return _c
}
// GetPoolByID provides a mock function with given fields: ctx, poolID
func (_m *Store) GetPoolByID(ctx context.Context, poolID string) (params.Pool, error) {
ret := _m.Called(ctx, poolID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetPoolByID")
+ }
+
var r0 params.Pool
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.Pool, error)); ok {
@@ -620,23 +2590,56 @@ func (_m *Store) GetPoolByID(ctx context.Context, poolID string) (params.Pool, e
return r0, r1
}
-// GetPoolInstanceByName provides a mock function with given fields: ctx, poolID, instanceName
-func (_m *Store) GetPoolInstanceByName(ctx context.Context, poolID string, instanceName string) (params.Instance, error) {
- ret := _m.Called(ctx, poolID, instanceName)
+// Store_GetPoolByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPoolByID'
+type Store_GetPoolByID_Call struct {
+ *mock.Call
+}
- var r0 params.Instance
+// GetPoolByID is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+func (_e *Store_Expecter) GetPoolByID(ctx interface{}, poolID interface{}) *Store_GetPoolByID_Call {
+ return &Store_GetPoolByID_Call{Call: _e.mock.On("GetPoolByID", ctx, poolID)}
+}
+
+func (_c *Store_GetPoolByID_Call) Run(run func(ctx context.Context, poolID string)) *Store_GetPoolByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetPoolByID_Call) Return(_a0 params.Pool, _a1 error) *Store_GetPoolByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetPoolByID_Call) RunAndReturn(run func(context.Context, string) (params.Pool, error)) *Store_GetPoolByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetRepository provides a mock function with given fields: ctx, owner, name, endpointName
+func (_m *Store) GetRepository(ctx context.Context, owner string, name string, endpointName string) (params.Repository, error) {
+ ret := _m.Called(ctx, owner, name, endpointName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetRepository")
+ }
+
+ var r0 params.Repository
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Instance, error)); ok {
- return rf(ctx, poolID, instanceName)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (params.Repository, error)); ok {
+ return rf(ctx, owner, name, endpointName)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Instance); ok {
- r0 = rf(ctx, poolID, instanceName)
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, string) params.Repository); ok {
+ r0 = rf(ctx, owner, name, endpointName)
} else {
- r0 = ret.Get(0).(params.Instance)
+ r0 = ret.Get(0).(params.Repository)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
- r1 = rf(ctx, poolID, instanceName)
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
+ r1 = rf(ctx, owner, name, endpointName)
} else {
r1 = ret.Error(1)
}
@@ -644,34 +2647,45 @@ func (_m *Store) GetPoolInstanceByName(ctx context.Context, poolID string, insta
return r0, r1
}
-// GetRepository provides a mock function with given fields: ctx, owner, name
-func (_m *Store) GetRepository(ctx context.Context, owner string, name string) (params.Repository, error) {
- ret := _m.Called(ctx, owner, name)
+// Store_GetRepository_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRepository'
+type Store_GetRepository_Call struct {
+ *mock.Call
+}
- var r0 params.Repository
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Repository, error)); ok {
- return rf(ctx, owner, name)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Repository); ok {
- r0 = rf(ctx, owner, name)
- } else {
- r0 = ret.Get(0).(params.Repository)
- }
+// GetRepository is a helper method to define mock.On call
+// - ctx context.Context
+// - owner string
+// - name string
+// - endpointName string
+func (_e *Store_Expecter) GetRepository(ctx interface{}, owner interface{}, name interface{}, endpointName interface{}) *Store_GetRepository_Call {
+ return &Store_GetRepository_Call{Call: _e.mock.On("GetRepository", ctx, owner, name, endpointName)}
+}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
- r1 = rf(ctx, owner, name)
- } else {
- r1 = ret.Error(1)
- }
+func (_c *Store_GetRepository_Call) Run(run func(ctx context.Context, owner string, name string, endpointName string)) *Store_GetRepository_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string))
+ })
+ return _c
+}
- return r0, r1
+func (_c *Store_GetRepository_Call) Return(_a0 params.Repository, _a1 error) *Store_GetRepository_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetRepository_Call) RunAndReturn(run func(context.Context, string, string, string) (params.Repository, error)) *Store_GetRepository_Call {
+ _c.Call.Return(run)
+ return _c
}
// GetRepositoryByID provides a mock function with given fields: ctx, repoID
func (_m *Store) GetRepositoryByID(ctx context.Context, repoID string) (params.Repository, error) {
ret := _m.Called(ctx, repoID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetRepositoryByID")
+ }
+
var r0 params.Repository
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.Repository, error)); ok {
@@ -692,23 +2706,56 @@ func (_m *Store) GetRepositoryByID(ctx context.Context, repoID string) (params.R
return r0, r1
}
-// GetRepositoryPool provides a mock function with given fields: ctx, repoID, poolID
-func (_m *Store) GetRepositoryPool(ctx context.Context, repoID string, poolID string) (params.Pool, error) {
- ret := _m.Called(ctx, repoID, poolID)
+// Store_GetRepositoryByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRepositoryByID'
+type Store_GetRepositoryByID_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
+// GetRepositoryByID is a helper method to define mock.On call
+// - ctx context.Context
+// - repoID string
+func (_e *Store_Expecter) GetRepositoryByID(ctx interface{}, repoID interface{}) *Store_GetRepositoryByID_Call {
+ return &Store_GetRepositoryByID_Call{Call: _e.mock.On("GetRepositoryByID", ctx, repoID)}
+}
+
+func (_c *Store_GetRepositoryByID_Call) Run(run func(ctx context.Context, repoID string)) *Store_GetRepositoryByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetRepositoryByID_Call) Return(_a0 params.Repository, _a1 error) *Store_GetRepositoryByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetRepositoryByID_Call) RunAndReturn(run func(context.Context, string) (params.Repository, error)) *Store_GetRepositoryByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetScaleSetByID provides a mock function with given fields: ctx, scaleSet
+func (_m *Store) GetScaleSetByID(ctx context.Context, scaleSet uint) (params.ScaleSet, error) {
+ ret := _m.Called(ctx, scaleSet)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetScaleSetByID")
+ }
+
+ var r0 params.ScaleSet
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (params.Pool, error)); ok {
- return rf(ctx, repoID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, uint) (params.ScaleSet, error)); ok {
+ return rf(ctx, scaleSet)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string) params.Pool); ok {
- r0 = rf(ctx, repoID, poolID)
+ if rf, ok := ret.Get(0).(func(context.Context, uint) params.ScaleSet); ok {
+ r0 = rf(ctx, scaleSet)
} else {
- r0 = ret.Get(0).(params.Pool)
+ r0 = ret.Get(0).(params.ScaleSet)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
- r1 = rf(ctx, repoID, poolID)
+ if rf, ok := ret.Get(1).(func(context.Context, uint) error); ok {
+ r1 = rf(ctx, scaleSet)
} else {
r1 = ret.Error(1)
}
@@ -716,10 +2763,43 @@ func (_m *Store) GetRepositoryPool(ctx context.Context, repoID string, poolID st
return r0, r1
}
+// Store_GetScaleSetByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetScaleSetByID'
+type Store_GetScaleSetByID_Call struct {
+ *mock.Call
+}
+
+// GetScaleSetByID is a helper method to define mock.On call
+// - ctx context.Context
+// - scaleSet uint
+func (_e *Store_Expecter) GetScaleSetByID(ctx interface{}, scaleSet interface{}) *Store_GetScaleSetByID_Call {
+ return &Store_GetScaleSetByID_Call{Call: _e.mock.On("GetScaleSetByID", ctx, scaleSet)}
+}
+
+func (_c *Store_GetScaleSetByID_Call) Run(run func(ctx context.Context, scaleSet uint)) *Store_GetScaleSetByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint))
+ })
+ return _c
+}
+
+func (_c *Store_GetScaleSetByID_Call) Return(_a0 params.ScaleSet, _a1 error) *Store_GetScaleSetByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetScaleSetByID_Call) RunAndReturn(run func(context.Context, uint) (params.ScaleSet, error)) *Store_GetScaleSetByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetUser provides a mock function with given fields: ctx, user
func (_m *Store) GetUser(ctx context.Context, user string) (params.User, error) {
ret := _m.Called(ctx, user)
+ if len(ret) == 0 {
+ panic("no return value specified for GetUser")
+ }
+
var r0 params.User
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.User, error)); ok {
@@ -740,10 +2820,43 @@ func (_m *Store) GetUser(ctx context.Context, user string) (params.User, error)
return r0, r1
}
+// Store_GetUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUser'
+type Store_GetUser_Call struct {
+ *mock.Call
+}
+
+// GetUser is a helper method to define mock.On call
+// - ctx context.Context
+// - user string
+func (_e *Store_Expecter) GetUser(ctx interface{}, user interface{}) *Store_GetUser_Call {
+ return &Store_GetUser_Call{Call: _e.mock.On("GetUser", ctx, user)}
+}
+
+func (_c *Store_GetUser_Call) Run(run func(ctx context.Context, user string)) *Store_GetUser_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetUser_Call) Return(_a0 params.User, _a1 error) *Store_GetUser_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetUser_Call) RunAndReturn(run func(context.Context, string) (params.User, error)) *Store_GetUser_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetUserByID provides a mock function with given fields: ctx, userID
func (_m *Store) GetUserByID(ctx context.Context, userID string) (params.User, error) {
ret := _m.Called(ctx, userID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetUserByID")
+ }
+
var r0 params.User
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (params.User, error)); ok {
@@ -764,10 +2877,43 @@ func (_m *Store) GetUserByID(ctx context.Context, userID string) (params.User, e
return r0, r1
}
+// Store_GetUserByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUserByID'
+type Store_GetUserByID_Call struct {
+ *mock.Call
+}
+
+// GetUserByID is a helper method to define mock.On call
+// - ctx context.Context
+// - userID string
+func (_e *Store_Expecter) GetUserByID(ctx interface{}, userID interface{}) *Store_GetUserByID_Call {
+ return &Store_GetUserByID_Call{Call: _e.mock.On("GetUserByID", ctx, userID)}
+}
+
+func (_c *Store_GetUserByID_Call) Run(run func(ctx context.Context, userID string)) *Store_GetUserByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_GetUserByID_Call) Return(_a0 params.User, _a1 error) *Store_GetUserByID_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_GetUserByID_Call) RunAndReturn(run func(context.Context, string) (params.User, error)) *Store_GetUserByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// HasAdminUser provides a mock function with given fields: ctx
func (_m *Store) HasAdminUser(ctx context.Context) bool {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for HasAdminUser")
+ }
+
var r0 bool
if rf, ok := ret.Get(0).(func(context.Context) bool); ok {
r0 = rf(ctx)
@@ -778,10 +2924,42 @@ func (_m *Store) HasAdminUser(ctx context.Context) bool {
return r0
}
-// InitController provides a mock function with given fields:
+// Store_HasAdminUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HasAdminUser'
+type Store_HasAdminUser_Call struct {
+ *mock.Call
+}
+
+// HasAdminUser is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) HasAdminUser(ctx interface{}) *Store_HasAdminUser_Call {
+ return &Store_HasAdminUser_Call{Call: _e.mock.On("HasAdminUser", ctx)}
+}
+
+func (_c *Store_HasAdminUser_Call) Run(run func(ctx context.Context)) *Store_HasAdminUser_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_HasAdminUser_Call) Return(_a0 bool) *Store_HasAdminUser_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_HasAdminUser_Call) RunAndReturn(run func(context.Context) bool) *Store_HasAdminUser_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// InitController provides a mock function with no fields
func (_m *Store) InitController() (params.ControllerInfo, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for InitController")
+ }
+
var r0 params.ControllerInfo
var r1 error
if rf, ok := ret.Get(0).(func() (params.ControllerInfo, error)); ok {
@@ -802,10 +2980,41 @@ func (_m *Store) InitController() (params.ControllerInfo, error) {
return r0, r1
}
+// Store_InitController_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitController'
+type Store_InitController_Call struct {
+ *mock.Call
+}
+
+// InitController is a helper method to define mock.On call
+func (_e *Store_Expecter) InitController() *Store_InitController_Call {
+ return &Store_InitController_Call{Call: _e.mock.On("InitController")}
+}
+
+func (_c *Store_InitController_Call) Run(run func()) *Store_InitController_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *Store_InitController_Call) Return(_a0 params.ControllerInfo, _a1 error) *Store_InitController_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_InitController_Call) RunAndReturn(run func() (params.ControllerInfo, error)) *Store_InitController_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListAllInstances provides a mock function with given fields: ctx
func (_m *Store) ListAllInstances(ctx context.Context) ([]params.Instance, error) {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for ListAllInstances")
+ }
+
var r0 []params.Instance
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) ([]params.Instance, error)); ok {
@@ -828,10 +3037,100 @@ func (_m *Store) ListAllInstances(ctx context.Context) ([]params.Instance, error
return r0, r1
}
+// Store_ListAllInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAllInstances'
+type Store_ListAllInstances_Call struct {
+ *mock.Call
+}
+
+// ListAllInstances is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListAllInstances(ctx interface{}) *Store_ListAllInstances_Call {
+ return &Store_ListAllInstances_Call{Call: _e.mock.On("ListAllInstances", ctx)}
+}
+
+func (_c *Store_ListAllInstances_Call) Run(run func(ctx context.Context)) *Store_ListAllInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListAllInstances_Call) Return(_a0 []params.Instance, _a1 error) *Store_ListAllInstances_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListAllInstances_Call) RunAndReturn(run func(context.Context) ([]params.Instance, error)) *Store_ListAllInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListAllJobs provides a mock function with given fields: ctx
+func (_m *Store) ListAllJobs(ctx context.Context) ([]params.Job, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListAllJobs")
+ }
+
+ var r0 []params.Job
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]params.Job, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []params.Job); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.Job)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListAllJobs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAllJobs'
+type Store_ListAllJobs_Call struct {
+ *mock.Call
+}
+
+// ListAllJobs is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListAllJobs(ctx interface{}) *Store_ListAllJobs_Call {
+ return &Store_ListAllJobs_Call{Call: _e.mock.On("ListAllJobs", ctx)}
+}
+
+func (_c *Store_ListAllJobs_Call) Run(run func(ctx context.Context)) *Store_ListAllJobs_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListAllJobs_Call) Return(_a0 []params.Job, _a1 error) *Store_ListAllJobs_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListAllJobs_Call) RunAndReturn(run func(context.Context) ([]params.Job, error)) *Store_ListAllJobs_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListAllPools provides a mock function with given fields: ctx
func (_m *Store) ListAllPools(ctx context.Context) ([]params.Pool, error) {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for ListAllPools")
+ }
+
var r0 []params.Pool
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) ([]params.Pool, error)); ok {
@@ -854,77 +3153,115 @@ func (_m *Store) ListAllPools(ctx context.Context) ([]params.Pool, error) {
return r0, r1
}
-// ListEnterpriseInstances provides a mock function with given fields: ctx, enterpriseID
-func (_m *Store) ListEnterpriseInstances(ctx context.Context, enterpriseID string) ([]params.Instance, error) {
- ret := _m.Called(ctx, enterpriseID)
-
- var r0 []params.Instance
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Instance, error)); ok {
- return rf(ctx, enterpriseID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Instance); ok {
- r0 = rf(ctx, enterpriseID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Instance)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, enterpriseID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// Store_ListAllPools_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAllPools'
+type Store_ListAllPools_Call struct {
+ *mock.Call
}
-// ListEnterprisePools provides a mock function with given fields: ctx, enterpriseID
-func (_m *Store) ListEnterprisePools(ctx context.Context, enterpriseID string) ([]params.Pool, error) {
- ret := _m.Called(ctx, enterpriseID)
-
- var r0 []params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Pool, error)); ok {
- return rf(ctx, enterpriseID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Pool); ok {
- r0 = rf(ctx, enterpriseID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Pool)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, enterpriseID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// ListAllPools is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListAllPools(ctx interface{}) *Store_ListAllPools_Call {
+ return &Store_ListAllPools_Call{Call: _e.mock.On("ListAllPools", ctx)}
}
-// ListEnterprises provides a mock function with given fields: ctx
-func (_m *Store) ListEnterprises(ctx context.Context) ([]params.Enterprise, error) {
+func (_c *Store_ListAllPools_Call) Run(run func(ctx context.Context)) *Store_ListAllPools_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListAllPools_Call) Return(_a0 []params.Pool, _a1 error) *Store_ListAllPools_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListAllPools_Call) RunAndReturn(run func(context.Context) ([]params.Pool, error)) *Store_ListAllPools_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListAllScaleSets provides a mock function with given fields: ctx
+func (_m *Store) ListAllScaleSets(ctx context.Context) ([]params.ScaleSet, error) {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for ListAllScaleSets")
+ }
+
+ var r0 []params.ScaleSet
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]params.ScaleSet, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []params.ScaleSet); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.ScaleSet)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListAllScaleSets_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAllScaleSets'
+type Store_ListAllScaleSets_Call struct {
+ *mock.Call
+}
+
+// ListAllScaleSets is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListAllScaleSets(ctx interface{}) *Store_ListAllScaleSets_Call {
+ return &Store_ListAllScaleSets_Call{Call: _e.mock.On("ListAllScaleSets", ctx)}
+}
+
+func (_c *Store_ListAllScaleSets_Call) Run(run func(ctx context.Context)) *Store_ListAllScaleSets_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListAllScaleSets_Call) Return(_a0 []params.ScaleSet, _a1 error) *Store_ListAllScaleSets_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListAllScaleSets_Call) RunAndReturn(run func(context.Context) ([]params.ScaleSet, error)) *Store_ListAllScaleSets_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEnterprises provides a mock function with given fields: ctx, filter
+func (_m *Store) ListEnterprises(ctx context.Context, filter params.EnterpriseFilter) ([]params.Enterprise, error) {
+ ret := _m.Called(ctx, filter)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEnterprises")
+ }
+
var r0 []params.Enterprise
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context) ([]params.Enterprise, error)); ok {
- return rf(ctx)
+ if rf, ok := ret.Get(0).(func(context.Context, params.EnterpriseFilter) ([]params.Enterprise, error)); ok {
+ return rf(ctx, filter)
}
- if rf, ok := ret.Get(0).(func(context.Context) []params.Enterprise); ok {
- r0 = rf(ctx)
+ if rf, ok := ret.Get(0).(func(context.Context, params.EnterpriseFilter) []params.Enterprise); ok {
+ r0 = rf(ctx, filter)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]params.Enterprise)
}
}
- if rf, ok := ret.Get(1).(func(context.Context) error); ok {
- r1 = rf(ctx)
+ if rf, ok := ret.Get(1).(func(context.Context, params.EnterpriseFilter) error); ok {
+ r1 = rf(ctx, filter)
} else {
r1 = ret.Error(1)
}
@@ -932,51 +3269,58 @@ func (_m *Store) ListEnterprises(ctx context.Context) ([]params.Enterprise, erro
return r0, r1
}
-// ListInstanceEvents provides a mock function with given fields: ctx, instanceID, eventType, eventLevel
-func (_m *Store) ListInstanceEvents(ctx context.Context, instanceID string, eventType params.EventType, eventLevel params.EventLevel) ([]params.StatusMessage, error) {
- ret := _m.Called(ctx, instanceID, eventType, eventLevel)
-
- var r0 []params.StatusMessage
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.EventType, params.EventLevel) ([]params.StatusMessage, error)); ok {
- return rf(ctx, instanceID, eventType, eventLevel)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, params.EventType, params.EventLevel) []params.StatusMessage); ok {
- r0 = rf(ctx, instanceID, eventType, eventLevel)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.StatusMessage)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string, params.EventType, params.EventLevel) error); ok {
- r1 = rf(ctx, instanceID, eventType, eventLevel)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// Store_ListEnterprises_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEnterprises'
+type Store_ListEnterprises_Call struct {
+ *mock.Call
}
-// ListOrgInstances provides a mock function with given fields: ctx, orgID
-func (_m *Store) ListOrgInstances(ctx context.Context, orgID string) ([]params.Instance, error) {
- ret := _m.Called(ctx, orgID)
+// ListEnterprises is a helper method to define mock.On call
+// - ctx context.Context
+// - filter params.EnterpriseFilter
+func (_e *Store_Expecter) ListEnterprises(ctx interface{}, filter interface{}) *Store_ListEnterprises_Call {
+ return &Store_ListEnterprises_Call{Call: _e.mock.On("ListEnterprises", ctx, filter)}
+}
+
+func (_c *Store_ListEnterprises_Call) Run(run func(ctx context.Context, filter params.EnterpriseFilter)) *Store_ListEnterprises_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.EnterpriseFilter))
+ })
+ return _c
+}
+
+func (_c *Store_ListEnterprises_Call) Return(_a0 []params.Enterprise, _a1 error) *Store_ListEnterprises_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListEnterprises_Call) RunAndReturn(run func(context.Context, params.EnterpriseFilter) ([]params.Enterprise, error)) *Store_ListEnterprises_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityInstances provides a mock function with given fields: ctx, entity
+func (_m *Store) ListEntityInstances(ctx context.Context, entity params.ForgeEntity) ([]params.Instance, error) {
+ ret := _m.Called(ctx, entity)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityInstances")
+ }
var r0 []params.Instance
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Instance, error)); ok {
- return rf(ctx, orgID)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) ([]params.Instance, error)); ok {
+ return rf(ctx, entity)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Instance); ok {
- r0 = rf(ctx, orgID)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) []params.Instance); ok {
+ r0 = rf(ctx, entity)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]params.Instance)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, orgID)
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity) error); ok {
+ r1 = rf(ctx, entity)
} else {
r1 = ret.Error(1)
}
@@ -984,25 +3328,119 @@ func (_m *Store) ListOrgInstances(ctx context.Context, orgID string) ([]params.I
return r0, r1
}
-// ListOrgPools provides a mock function with given fields: ctx, orgID
-func (_m *Store) ListOrgPools(ctx context.Context, orgID string) ([]params.Pool, error) {
- ret := _m.Called(ctx, orgID)
+// Store_ListEntityInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityInstances'
+type Store_ListEntityInstances_Call struct {
+ *mock.Call
+}
+
+// ListEntityInstances is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+func (_e *Store_Expecter) ListEntityInstances(ctx interface{}, entity interface{}) *Store_ListEntityInstances_Call {
+ return &Store_ListEntityInstances_Call{Call: _e.mock.On("ListEntityInstances", ctx, entity)}
+}
+
+func (_c *Store_ListEntityInstances_Call) Run(run func(ctx context.Context, entity params.ForgeEntity)) *Store_ListEntityInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity))
+ })
+ return _c
+}
+
+func (_c *Store_ListEntityInstances_Call) Return(_a0 []params.Instance, _a1 error) *Store_ListEntityInstances_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListEntityInstances_Call) RunAndReturn(run func(context.Context, params.ForgeEntity) ([]params.Instance, error)) *Store_ListEntityInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityJobsByStatus provides a mock function with given fields: ctx, entityType, entityID, status
+func (_m *Store) ListEntityJobsByStatus(ctx context.Context, entityType params.ForgeEntityType, entityID string, status params.JobStatus) ([]params.Job, error) {
+ ret := _m.Called(ctx, entityType, entityID, status)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityJobsByStatus")
+ }
+
+ var r0 []params.Job
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string, params.JobStatus) ([]params.Job, error)); ok {
+ return rf(ctx, entityType, entityID, status)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntityType, string, params.JobStatus) []params.Job); ok {
+ r0 = rf(ctx, entityType, entityID, status)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.Job)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntityType, string, params.JobStatus) error); ok {
+ r1 = rf(ctx, entityType, entityID, status)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListEntityJobsByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityJobsByStatus'
+type Store_ListEntityJobsByStatus_Call struct {
+ *mock.Call
+}
+
+// ListEntityJobsByStatus is a helper method to define mock.On call
+// - ctx context.Context
+// - entityType params.ForgeEntityType
+// - entityID string
+// - status params.JobStatus
+func (_e *Store_Expecter) ListEntityJobsByStatus(ctx interface{}, entityType interface{}, entityID interface{}, status interface{}) *Store_ListEntityJobsByStatus_Call {
+ return &Store_ListEntityJobsByStatus_Call{Call: _e.mock.On("ListEntityJobsByStatus", ctx, entityType, entityID, status)}
+}
+
+func (_c *Store_ListEntityJobsByStatus_Call) Run(run func(ctx context.Context, entityType params.ForgeEntityType, entityID string, status params.JobStatus)) *Store_ListEntityJobsByStatus_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntityType), args[2].(string), args[3].(params.JobStatus))
+ })
+ return _c
+}
+
+func (_c *Store_ListEntityJobsByStatus_Call) Return(_a0 []params.Job, _a1 error) *Store_ListEntityJobsByStatus_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListEntityJobsByStatus_Call) RunAndReturn(run func(context.Context, params.ForgeEntityType, string, params.JobStatus) ([]params.Job, error)) *Store_ListEntityJobsByStatus_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityPools provides a mock function with given fields: ctx, entity
+func (_m *Store) ListEntityPools(ctx context.Context, entity params.ForgeEntity) ([]params.Pool, error) {
+ ret := _m.Called(ctx, entity)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityPools")
+ }
var r0 []params.Pool
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Pool, error)); ok {
- return rf(ctx, orgID)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) ([]params.Pool, error)); ok {
+ return rf(ctx, entity)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Pool); ok {
- r0 = rf(ctx, orgID)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) []params.Pool); ok {
+ r0 = rf(ctx, entity)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]params.Pool)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, orgID)
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity) error); ok {
+ r1 = rf(ctx, entity)
} else {
r1 = ret.Error(1)
}
@@ -1010,20 +3448,112 @@ func (_m *Store) ListOrgPools(ctx context.Context, orgID string) ([]params.Pool,
return r0, r1
}
-// ListOrganizations provides a mock function with given fields: ctx
-func (_m *Store) ListOrganizations(ctx context.Context) ([]params.Organization, error) {
+// Store_ListEntityPools_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityPools'
+type Store_ListEntityPools_Call struct {
+ *mock.Call
+}
+
+// ListEntityPools is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+func (_e *Store_Expecter) ListEntityPools(ctx interface{}, entity interface{}) *Store_ListEntityPools_Call {
+ return &Store_ListEntityPools_Call{Call: _e.mock.On("ListEntityPools", ctx, entity)}
+}
+
+func (_c *Store_ListEntityPools_Call) Run(run func(ctx context.Context, entity params.ForgeEntity)) *Store_ListEntityPools_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity))
+ })
+ return _c
+}
+
+func (_c *Store_ListEntityPools_Call) Return(_a0 []params.Pool, _a1 error) *Store_ListEntityPools_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListEntityPools_Call) RunAndReturn(run func(context.Context, params.ForgeEntity) ([]params.Pool, error)) *Store_ListEntityPools_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityScaleSets provides a mock function with given fields: _a0, entity
+func (_m *Store) ListEntityScaleSets(_a0 context.Context, entity params.ForgeEntity) ([]params.ScaleSet, error) {
+ ret := _m.Called(_a0, entity)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityScaleSets")
+ }
+
+ var r0 []params.ScaleSet
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) ([]params.ScaleSet, error)); ok {
+ return rf(_a0, entity)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity) []params.ScaleSet); ok {
+ r0 = rf(_a0, entity)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.ScaleSet)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity) error); ok {
+ r1 = rf(_a0, entity)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListEntityScaleSets_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityScaleSets'
+type Store_ListEntityScaleSets_Call struct {
+ *mock.Call
+}
+
+// ListEntityScaleSets is a helper method to define mock.On call
+// - _a0 context.Context
+// - entity params.ForgeEntity
+func (_e *Store_Expecter) ListEntityScaleSets(_a0 interface{}, entity interface{}) *Store_ListEntityScaleSets_Call {
+ return &Store_ListEntityScaleSets_Call{Call: _e.mock.On("ListEntityScaleSets", _a0, entity)}
+}
+
+func (_c *Store_ListEntityScaleSets_Call) Run(run func(_a0 context.Context, entity params.ForgeEntity)) *Store_ListEntityScaleSets_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity))
+ })
+ return _c
+}
+
+func (_c *Store_ListEntityScaleSets_Call) Return(_a0 []params.ScaleSet, _a1 error) *Store_ListEntityScaleSets_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListEntityScaleSets_Call) RunAndReturn(run func(context.Context, params.ForgeEntity) ([]params.ScaleSet, error)) *Store_ListEntityScaleSets_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListGiteaCredentials provides a mock function with given fields: ctx
+func (_m *Store) ListGiteaCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
ret := _m.Called(ctx)
- var r0 []params.Organization
+ if len(ret) == 0 {
+ panic("no return value specified for ListGiteaCredentials")
+ }
+
+ var r0 []params.ForgeCredentials
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context) ([]params.Organization, error)); ok {
+ if rf, ok := ret.Get(0).(func(context.Context) ([]params.ForgeCredentials, error)); ok {
return rf(ctx)
}
- if rf, ok := ret.Get(0).(func(context.Context) []params.Organization); ok {
+ if rf, ok := ret.Get(0).(func(context.Context) []params.ForgeCredentials); ok {
r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Organization)
+ r0 = ret.Get(0).([]params.ForgeCredentials)
}
}
@@ -1036,10 +3566,334 @@ func (_m *Store) ListOrganizations(ctx context.Context) ([]params.Organization,
return r0, r1
}
+// Store_ListGiteaCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListGiteaCredentials'
+type Store_ListGiteaCredentials_Call struct {
+ *mock.Call
+}
+
+// ListGiteaCredentials is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListGiteaCredentials(ctx interface{}) *Store_ListGiteaCredentials_Call {
+ return &Store_ListGiteaCredentials_Call{Call: _e.mock.On("ListGiteaCredentials", ctx)}
+}
+
+func (_c *Store_ListGiteaCredentials_Call) Run(run func(ctx context.Context)) *Store_ListGiteaCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListGiteaCredentials_Call) Return(_a0 []params.ForgeCredentials, _a1 error) *Store_ListGiteaCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListGiteaCredentials_Call) RunAndReturn(run func(context.Context) ([]params.ForgeCredentials, error)) *Store_ListGiteaCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListGiteaEndpoints provides a mock function with given fields: _a0
+func (_m *Store) ListGiteaEndpoints(_a0 context.Context) ([]params.ForgeEndpoint, error) {
+ ret := _m.Called(_a0)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListGiteaEndpoints")
+ }
+
+ var r0 []params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]params.ForgeEndpoint, error)); ok {
+ return rf(_a0)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []params.ForgeEndpoint); ok {
+ r0 = rf(_a0)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.ForgeEndpoint)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(_a0)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListGiteaEndpoints_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListGiteaEndpoints'
+type Store_ListGiteaEndpoints_Call struct {
+ *mock.Call
+}
+
+// ListGiteaEndpoints is a helper method to define mock.On call
+// - _a0 context.Context
+func (_e *Store_Expecter) ListGiteaEndpoints(_a0 interface{}) *Store_ListGiteaEndpoints_Call {
+ return &Store_ListGiteaEndpoints_Call{Call: _e.mock.On("ListGiteaEndpoints", _a0)}
+}
+
+func (_c *Store_ListGiteaEndpoints_Call) Run(run func(_a0 context.Context)) *Store_ListGiteaEndpoints_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListGiteaEndpoints_Call) Return(_a0 []params.ForgeEndpoint, _a1 error) *Store_ListGiteaEndpoints_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListGiteaEndpoints_Call) RunAndReturn(run func(context.Context) ([]params.ForgeEndpoint, error)) *Store_ListGiteaEndpoints_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListGithubCredentials provides a mock function with given fields: ctx
+func (_m *Store) ListGithubCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListGithubCredentials")
+ }
+
+ var r0 []params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]params.ForgeCredentials, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []params.ForgeCredentials); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.ForgeCredentials)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListGithubCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListGithubCredentials'
+type Store_ListGithubCredentials_Call struct {
+ *mock.Call
+}
+
+// ListGithubCredentials is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListGithubCredentials(ctx interface{}) *Store_ListGithubCredentials_Call {
+ return &Store_ListGithubCredentials_Call{Call: _e.mock.On("ListGithubCredentials", ctx)}
+}
+
+func (_c *Store_ListGithubCredentials_Call) Run(run func(ctx context.Context)) *Store_ListGithubCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListGithubCredentials_Call) Return(_a0 []params.ForgeCredentials, _a1 error) *Store_ListGithubCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListGithubCredentials_Call) RunAndReturn(run func(context.Context) ([]params.ForgeCredentials, error)) *Store_ListGithubCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListGithubEndpoints provides a mock function with given fields: ctx
+func (_m *Store) ListGithubEndpoints(ctx context.Context) ([]params.ForgeEndpoint, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListGithubEndpoints")
+ }
+
+ var r0 []params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]params.ForgeEndpoint, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []params.ForgeEndpoint); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.ForgeEndpoint)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListGithubEndpoints_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListGithubEndpoints'
+type Store_ListGithubEndpoints_Call struct {
+ *mock.Call
+}
+
+// ListGithubEndpoints is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *Store_Expecter) ListGithubEndpoints(ctx interface{}) *Store_ListGithubEndpoints_Call {
+ return &Store_ListGithubEndpoints_Call{Call: _e.mock.On("ListGithubEndpoints", ctx)}
+}
+
+func (_c *Store_ListGithubEndpoints_Call) Run(run func(ctx context.Context)) *Store_ListGithubEndpoints_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *Store_ListGithubEndpoints_Call) Return(_a0 []params.ForgeEndpoint, _a1 error) *Store_ListGithubEndpoints_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListGithubEndpoints_Call) RunAndReturn(run func(context.Context) ([]params.ForgeEndpoint, error)) *Store_ListGithubEndpoints_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListJobsByStatus provides a mock function with given fields: ctx, status
+func (_m *Store) ListJobsByStatus(ctx context.Context, status params.JobStatus) ([]params.Job, error) {
+ ret := _m.Called(ctx, status)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListJobsByStatus")
+ }
+
+ var r0 []params.Job
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.JobStatus) ([]params.Job, error)); ok {
+ return rf(ctx, status)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.JobStatus) []params.Job); ok {
+ r0 = rf(ctx, status)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.Job)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.JobStatus) error); ok {
+ r1 = rf(ctx, status)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListJobsByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListJobsByStatus'
+type Store_ListJobsByStatus_Call struct {
+ *mock.Call
+}
+
+// ListJobsByStatus is a helper method to define mock.On call
+// - ctx context.Context
+// - status params.JobStatus
+func (_e *Store_Expecter) ListJobsByStatus(ctx interface{}, status interface{}) *Store_ListJobsByStatus_Call {
+ return &Store_ListJobsByStatus_Call{Call: _e.mock.On("ListJobsByStatus", ctx, status)}
+}
+
+func (_c *Store_ListJobsByStatus_Call) Run(run func(ctx context.Context, status params.JobStatus)) *Store_ListJobsByStatus_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.JobStatus))
+ })
+ return _c
+}
+
+func (_c *Store_ListJobsByStatus_Call) Return(_a0 []params.Job, _a1 error) *Store_ListJobsByStatus_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListJobsByStatus_Call) RunAndReturn(run func(context.Context, params.JobStatus) ([]params.Job, error)) *Store_ListJobsByStatus_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListOrganizations provides a mock function with given fields: ctx, filter
+func (_m *Store) ListOrganizations(ctx context.Context, filter params.OrganizationFilter) ([]params.Organization, error) {
+ ret := _m.Called(ctx, filter)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListOrganizations")
+ }
+
+ var r0 []params.Organization
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.OrganizationFilter) ([]params.Organization, error)); ok {
+ return rf(ctx, filter)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.OrganizationFilter) []params.Organization); ok {
+ r0 = rf(ctx, filter)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.Organization)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.OrganizationFilter) error); ok {
+ r1 = rf(ctx, filter)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListOrganizations_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListOrganizations'
+type Store_ListOrganizations_Call struct {
+ *mock.Call
+}
+
+// ListOrganizations is a helper method to define mock.On call
+// - ctx context.Context
+// - filter params.OrganizationFilter
+func (_e *Store_Expecter) ListOrganizations(ctx interface{}, filter interface{}) *Store_ListOrganizations_Call {
+ return &Store_ListOrganizations_Call{Call: _e.mock.On("ListOrganizations", ctx, filter)}
+}
+
+func (_c *Store_ListOrganizations_Call) Run(run func(ctx context.Context, filter params.OrganizationFilter)) *Store_ListOrganizations_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.OrganizationFilter))
+ })
+ return _c
+}
+
+func (_c *Store_ListOrganizations_Call) Return(_a0 []params.Organization, _a1 error) *Store_ListOrganizations_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListOrganizations_Call) RunAndReturn(run func(context.Context, params.OrganizationFilter) ([]params.Organization, error)) *Store_ListOrganizations_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// ListPoolInstances provides a mock function with given fields: ctx, poolID
func (_m *Store) ListPoolInstances(ctx context.Context, poolID string) ([]params.Instance, error) {
ret := _m.Called(ctx, poolID)
+ if len(ret) == 0 {
+ panic("no return value specified for ListPoolInstances")
+ }
+
var r0 []params.Instance
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Instance, error)); ok {
@@ -1062,77 +3916,58 @@ func (_m *Store) ListPoolInstances(ctx context.Context, poolID string) ([]params
return r0, r1
}
-// ListRepoInstances provides a mock function with given fields: ctx, repoID
-func (_m *Store) ListRepoInstances(ctx context.Context, repoID string) ([]params.Instance, error) {
- ret := _m.Called(ctx, repoID)
-
- var r0 []params.Instance
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Instance, error)); ok {
- return rf(ctx, repoID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Instance); ok {
- r0 = rf(ctx, repoID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Instance)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, repoID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// Store_ListPoolInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListPoolInstances'
+type Store_ListPoolInstances_Call struct {
+ *mock.Call
}
-// ListRepoPools provides a mock function with given fields: ctx, repoID
-func (_m *Store) ListRepoPools(ctx context.Context, repoID string) ([]params.Pool, error) {
- ret := _m.Called(ctx, repoID)
-
- var r0 []params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Pool, error)); ok {
- return rf(ctx, repoID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Pool); ok {
- r0 = rf(ctx, repoID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Pool)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, repoID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+// ListPoolInstances is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+func (_e *Store_Expecter) ListPoolInstances(ctx interface{}, poolID interface{}) *Store_ListPoolInstances_Call {
+ return &Store_ListPoolInstances_Call{Call: _e.mock.On("ListPoolInstances", ctx, poolID)}
}
-// ListRepositories provides a mock function with given fields: ctx
-func (_m *Store) ListRepositories(ctx context.Context) ([]params.Repository, error) {
- ret := _m.Called(ctx)
+func (_c *Store_ListPoolInstances_Call) Run(run func(ctx context.Context, poolID string)) *Store_ListPoolInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_ListPoolInstances_Call) Return(_a0 []params.Instance, _a1 error) *Store_ListPoolInstances_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListPoolInstances_Call) RunAndReturn(run func(context.Context, string) ([]params.Instance, error)) *Store_ListPoolInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListRepositories provides a mock function with given fields: ctx, filter
+func (_m *Store) ListRepositories(ctx context.Context, filter params.RepositoryFilter) ([]params.Repository, error) {
+ ret := _m.Called(ctx, filter)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListRepositories")
+ }
var r0 []params.Repository
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context) ([]params.Repository, error)); ok {
- return rf(ctx)
+ if rf, ok := ret.Get(0).(func(context.Context, params.RepositoryFilter) ([]params.Repository, error)); ok {
+ return rf(ctx, filter)
}
- if rf, ok := ret.Get(0).(func(context.Context) []params.Repository); ok {
- r0 = rf(ctx)
+ if rf, ok := ret.Get(0).(func(context.Context, params.RepositoryFilter) []params.Repository); ok {
+ r0 = rf(ctx, filter)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]params.Repository)
}
}
- if rf, ok := ret.Get(1).(func(context.Context) error); ok {
- r1 = rf(ctx)
+ if rf, ok := ret.Get(1).(func(context.Context, params.RepositoryFilter) error); ok {
+ r1 = rf(ctx, filter)
} else {
r1 = ret.Error(1)
}
@@ -1140,10 +3975,150 @@ func (_m *Store) ListRepositories(ctx context.Context) ([]params.Repository, err
return r0, r1
}
+// Store_ListRepositories_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListRepositories'
+type Store_ListRepositories_Call struct {
+ *mock.Call
+}
+
+// ListRepositories is a helper method to define mock.On call
+// - ctx context.Context
+// - filter params.RepositoryFilter
+func (_e *Store_Expecter) ListRepositories(ctx interface{}, filter interface{}) *Store_ListRepositories_Call {
+ return &Store_ListRepositories_Call{Call: _e.mock.On("ListRepositories", ctx, filter)}
+}
+
+func (_c *Store_ListRepositories_Call) Run(run func(ctx context.Context, filter params.RepositoryFilter)) *Store_ListRepositories_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.RepositoryFilter))
+ })
+ return _c
+}
+
+func (_c *Store_ListRepositories_Call) Return(_a0 []params.Repository, _a1 error) *Store_ListRepositories_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListRepositories_Call) RunAndReturn(run func(context.Context, params.RepositoryFilter) ([]params.Repository, error)) *Store_ListRepositories_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListScaleSetInstances provides a mock function with given fields: _a0, scalesetID
+func (_m *Store) ListScaleSetInstances(_a0 context.Context, scalesetID uint) ([]params.Instance, error) {
+ ret := _m.Called(_a0, scalesetID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListScaleSetInstances")
+ }
+
+ var r0 []params.Instance
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint) ([]params.Instance, error)); ok {
+ return rf(_a0, scalesetID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint) []params.Instance); ok {
+ r0 = rf(_a0, scalesetID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]params.Instance)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint) error); ok {
+ r1 = rf(_a0, scalesetID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_ListScaleSetInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListScaleSetInstances'
+type Store_ListScaleSetInstances_Call struct {
+ *mock.Call
+}
+
+// ListScaleSetInstances is a helper method to define mock.On call
+// - _a0 context.Context
+// - scalesetID uint
+func (_e *Store_Expecter) ListScaleSetInstances(_a0 interface{}, scalesetID interface{}) *Store_ListScaleSetInstances_Call {
+ return &Store_ListScaleSetInstances_Call{Call: _e.mock.On("ListScaleSetInstances", _a0, scalesetID)}
+}
+
+func (_c *Store_ListScaleSetInstances_Call) Run(run func(_a0 context.Context, scalesetID uint)) *Store_ListScaleSetInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint))
+ })
+ return _c
+}
+
+func (_c *Store_ListScaleSetInstances_Call) Return(_a0 []params.Instance, _a1 error) *Store_ListScaleSetInstances_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_ListScaleSetInstances_Call) RunAndReturn(run func(context.Context, uint) ([]params.Instance, error)) *Store_ListScaleSetInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// LockJob provides a mock function with given fields: ctx, jobID, entityID
+func (_m *Store) LockJob(ctx context.Context, jobID int64, entityID string) error {
+ ret := _m.Called(ctx, jobID, entityID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for LockJob")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64, string) error); ok {
+ r0 = rf(ctx, jobID, entityID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_LockJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LockJob'
+type Store_LockJob_Call struct {
+ *mock.Call
+}
+
+// LockJob is a helper method to define mock.On call
+// - ctx context.Context
+// - jobID int64
+// - entityID string
+func (_e *Store_Expecter) LockJob(ctx interface{}, jobID interface{}, entityID interface{}) *Store_LockJob_Call {
+ return &Store_LockJob_Call{Call: _e.mock.On("LockJob", ctx, jobID, entityID)}
+}
+
+func (_c *Store_LockJob_Call) Run(run func(ctx context.Context, jobID int64, entityID string)) *Store_LockJob_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_LockJob_Call) Return(_a0 error) *Store_LockJob_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_LockJob_Call) RunAndReturn(run func(context.Context, int64, string) error) *Store_LockJob_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// PoolInstanceCount provides a mock function with given fields: ctx, poolID
func (_m *Store) PoolInstanceCount(ctx context.Context, poolID string) (int64, error) {
ret := _m.Called(ctx, poolID)
+ if len(ret) == 0 {
+ panic("no return value specified for PoolInstanceCount")
+ }
+
var r0 int64
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (int64, error)); ok {
@@ -1164,22 +4139,255 @@ func (_m *Store) PoolInstanceCount(ctx context.Context, poolID string) (int64, e
return r0, r1
}
+// Store_PoolInstanceCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PoolInstanceCount'
+type Store_PoolInstanceCount_Call struct {
+ *mock.Call
+}
+
+// PoolInstanceCount is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+func (_e *Store_Expecter) PoolInstanceCount(ctx interface{}, poolID interface{}) *Store_PoolInstanceCount_Call {
+ return &Store_PoolInstanceCount_Call{Call: _e.mock.On("PoolInstanceCount", ctx, poolID)}
+}
+
+func (_c *Store_PoolInstanceCount_Call) Run(run func(ctx context.Context, poolID string)) *Store_PoolInstanceCount_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *Store_PoolInstanceCount_Call) Return(_a0 int64, _a1 error) *Store_PoolInstanceCount_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_PoolInstanceCount_Call) RunAndReturn(run func(context.Context, string) (int64, error)) *Store_PoolInstanceCount_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// SetScaleSetDesiredRunnerCount provides a mock function with given fields: ctx, scaleSetID, desiredRunnerCount
+func (_m *Store) SetScaleSetDesiredRunnerCount(ctx context.Context, scaleSetID uint, desiredRunnerCount int) error {
+ ret := _m.Called(ctx, scaleSetID, desiredRunnerCount)
+
+ if len(ret) == 0 {
+ panic("no return value specified for SetScaleSetDesiredRunnerCount")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, int) error); ok {
+ r0 = rf(ctx, scaleSetID, desiredRunnerCount)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_SetScaleSetDesiredRunnerCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetScaleSetDesiredRunnerCount'
+type Store_SetScaleSetDesiredRunnerCount_Call struct {
+ *mock.Call
+}
+
+// SetScaleSetDesiredRunnerCount is a helper method to define mock.On call
+// - ctx context.Context
+// - scaleSetID uint
+// - desiredRunnerCount int
+func (_e *Store_Expecter) SetScaleSetDesiredRunnerCount(ctx interface{}, scaleSetID interface{}, desiredRunnerCount interface{}) *Store_SetScaleSetDesiredRunnerCount_Call {
+ return &Store_SetScaleSetDesiredRunnerCount_Call{Call: _e.mock.On("SetScaleSetDesiredRunnerCount", ctx, scaleSetID, desiredRunnerCount)}
+}
+
+func (_c *Store_SetScaleSetDesiredRunnerCount_Call) Run(run func(ctx context.Context, scaleSetID uint, desiredRunnerCount int)) *Store_SetScaleSetDesiredRunnerCount_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(int))
+ })
+ return _c
+}
+
+func (_c *Store_SetScaleSetDesiredRunnerCount_Call) Return(_a0 error) *Store_SetScaleSetDesiredRunnerCount_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_SetScaleSetDesiredRunnerCount_Call) RunAndReturn(run func(context.Context, uint, int) error) *Store_SetScaleSetDesiredRunnerCount_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// SetScaleSetLastMessageID provides a mock function with given fields: ctx, scaleSetID, lastMessageID
+func (_m *Store) SetScaleSetLastMessageID(ctx context.Context, scaleSetID uint, lastMessageID int64) error {
+ ret := _m.Called(ctx, scaleSetID, lastMessageID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for SetScaleSetLastMessageID")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, int64) error); ok {
+ r0 = rf(ctx, scaleSetID, lastMessageID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_SetScaleSetLastMessageID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetScaleSetLastMessageID'
+type Store_SetScaleSetLastMessageID_Call struct {
+ *mock.Call
+}
+
+// SetScaleSetLastMessageID is a helper method to define mock.On call
+// - ctx context.Context
+// - scaleSetID uint
+// - lastMessageID int64
+func (_e *Store_Expecter) SetScaleSetLastMessageID(ctx interface{}, scaleSetID interface{}, lastMessageID interface{}) *Store_SetScaleSetLastMessageID_Call {
+ return &Store_SetScaleSetLastMessageID_Call{Call: _e.mock.On("SetScaleSetLastMessageID", ctx, scaleSetID, lastMessageID)}
+}
+
+func (_c *Store_SetScaleSetLastMessageID_Call) Run(run func(ctx context.Context, scaleSetID uint, lastMessageID int64)) *Store_SetScaleSetLastMessageID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(int64))
+ })
+ return _c
+}
+
+func (_c *Store_SetScaleSetLastMessageID_Call) Return(_a0 error) *Store_SetScaleSetLastMessageID_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_SetScaleSetLastMessageID_Call) RunAndReturn(run func(context.Context, uint, int64) error) *Store_SetScaleSetLastMessageID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UnlockJob provides a mock function with given fields: ctx, jobID, entityID
+func (_m *Store) UnlockJob(ctx context.Context, jobID int64, entityID string) error {
+ ret := _m.Called(ctx, jobID, entityID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UnlockJob")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64, string) error); ok {
+ r0 = rf(ctx, jobID, entityID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Store_UnlockJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnlockJob'
+type Store_UnlockJob_Call struct {
+ *mock.Call
+}
+
+// UnlockJob is a helper method to define mock.On call
+// - ctx context.Context
+// - jobID int64
+// - entityID string
+func (_e *Store_Expecter) UnlockJob(ctx interface{}, jobID interface{}, entityID interface{}) *Store_UnlockJob_Call {
+ return &Store_UnlockJob_Call{Call: _e.mock.On("UnlockJob", ctx, jobID, entityID)}
+}
+
+func (_c *Store_UnlockJob_Call) Run(run func(ctx context.Context, jobID int64, entityID string)) *Store_UnlockJob_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64), args[2].(string))
+ })
+ return _c
+}
+
+func (_c *Store_UnlockJob_Call) Return(_a0 error) *Store_UnlockJob_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Store_UnlockJob_Call) RunAndReturn(run func(context.Context, int64, string) error) *Store_UnlockJob_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateController provides a mock function with given fields: info
+func (_m *Store) UpdateController(info params.UpdateControllerParams) (params.ControllerInfo, error) {
+ ret := _m.Called(info)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateController")
+ }
+
+ var r0 params.ControllerInfo
+ var r1 error
+ if rf, ok := ret.Get(0).(func(params.UpdateControllerParams) (params.ControllerInfo, error)); ok {
+ return rf(info)
+ }
+ if rf, ok := ret.Get(0).(func(params.UpdateControllerParams) params.ControllerInfo); ok {
+ r0 = rf(info)
+ } else {
+ r0 = ret.Get(0).(params.ControllerInfo)
+ }
+
+ if rf, ok := ret.Get(1).(func(params.UpdateControllerParams) error); ok {
+ r1 = rf(info)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateController_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateController'
+type Store_UpdateController_Call struct {
+ *mock.Call
+}
+
+// UpdateController is a helper method to define mock.On call
+// - info params.UpdateControllerParams
+func (_e *Store_Expecter) UpdateController(info interface{}) *Store_UpdateController_Call {
+ return &Store_UpdateController_Call{Call: _e.mock.On("UpdateController", info)}
+}
+
+func (_c *Store_UpdateController_Call) Run(run func(info params.UpdateControllerParams)) *Store_UpdateController_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.UpdateControllerParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateController_Call) Return(_a0 params.ControllerInfo, _a1 error) *Store_UpdateController_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateController_Call) RunAndReturn(run func(params.UpdateControllerParams) (params.ControllerInfo, error)) *Store_UpdateController_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// UpdateEnterprise provides a mock function with given fields: ctx, enterpriseID, param
-func (_m *Store) UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateRepositoryParams) (params.Enterprise, error) {
+func (_m *Store) UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateEntityParams) (params.Enterprise, error) {
ret := _m.Called(ctx, enterpriseID, param)
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateEnterprise")
+ }
+
var r0 params.Enterprise
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateRepositoryParams) (params.Enterprise, error)); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateEntityParams) (params.Enterprise, error)); ok {
return rf(ctx, enterpriseID, param)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateRepositoryParams) params.Enterprise); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateEntityParams) params.Enterprise); ok {
r0 = rf(ctx, enterpriseID, param)
} else {
r0 = ret.Get(0).(params.Enterprise)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateRepositoryParams) error); ok {
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateEntityParams) error); ok {
r1 = rf(ctx, enterpriseID, param)
} else {
r1 = ret.Error(1)
@@ -1188,23 +4396,57 @@ func (_m *Store) UpdateEnterprise(ctx context.Context, enterpriseID string, para
return r0, r1
}
-// UpdateEnterprisePool provides a mock function with given fields: ctx, enterpriseID, poolID, param
-func (_m *Store) UpdateEnterprisePool(ctx context.Context, enterpriseID string, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, enterpriseID, poolID, param)
+// Store_UpdateEnterprise_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateEnterprise'
+type Store_UpdateEnterprise_Call struct {
+ *mock.Call
+}
+
+// UpdateEnterprise is a helper method to define mock.On call
+// - ctx context.Context
+// - enterpriseID string
+// - param params.UpdateEntityParams
+func (_e *Store_Expecter) UpdateEnterprise(ctx interface{}, enterpriseID interface{}, param interface{}) *Store_UpdateEnterprise_Call {
+ return &Store_UpdateEnterprise_Call{Call: _e.mock.On("UpdateEnterprise", ctx, enterpriseID, param)}
+}
+
+func (_c *Store_UpdateEnterprise_Call) Run(run func(ctx context.Context, enterpriseID string, param params.UpdateEntityParams)) *Store_UpdateEnterprise_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateEntityParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateEnterprise_Call) Return(_a0 params.Enterprise, _a1 error) *Store_UpdateEnterprise_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateEnterprise_Call) RunAndReturn(run func(context.Context, string, params.UpdateEntityParams) (params.Enterprise, error)) *Store_UpdateEnterprise_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateEntityPool provides a mock function with given fields: ctx, entity, poolID, param
+func (_m *Store) UpdateEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
+ ret := _m.Called(ctx, entity, poolID, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateEntityPool")
+ }
var r0 params.Pool
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, enterpriseID, poolID, param)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, string, params.UpdatePoolParams) (params.Pool, error)); ok {
+ return rf(ctx, entity, poolID, param)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) params.Pool); ok {
- r0 = rf(ctx, enterpriseID, poolID, param)
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, string, params.UpdatePoolParams) params.Pool); ok {
+ r0 = rf(ctx, entity, poolID, param)
} else {
r0 = ret.Get(0).(params.Pool)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, params.UpdatePoolParams) error); ok {
- r1 = rf(ctx, enterpriseID, poolID, param)
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity, string, params.UpdatePoolParams) error); ok {
+ r1 = rf(ctx, entity, poolID, param)
} else {
r1 = ret.Error(1)
}
@@ -1212,23 +4454,350 @@ func (_m *Store) UpdateEnterprisePool(ctx context.Context, enterpriseID string,
return r0, r1
}
-// UpdateInstance provides a mock function with given fields: ctx, instanceID, param
-func (_m *Store) UpdateInstance(ctx context.Context, instanceID string, param params.UpdateInstanceParams) (params.Instance, error) {
- ret := _m.Called(ctx, instanceID, param)
+// Store_UpdateEntityPool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateEntityPool'
+type Store_UpdateEntityPool_Call struct {
+ *mock.Call
+}
+
+// UpdateEntityPool is a helper method to define mock.On call
+// - ctx context.Context
+// - entity params.ForgeEntity
+// - poolID string
+// - param params.UpdatePoolParams
+func (_e *Store_Expecter) UpdateEntityPool(ctx interface{}, entity interface{}, poolID interface{}, param interface{}) *Store_UpdateEntityPool_Call {
+ return &Store_UpdateEntityPool_Call{Call: _e.mock.On("UpdateEntityPool", ctx, entity, poolID, param)}
+}
+
+func (_c *Store_UpdateEntityPool_Call) Run(run func(ctx context.Context, entity params.ForgeEntity, poolID string, param params.UpdatePoolParams)) *Store_UpdateEntityPool_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(string), args[3].(params.UpdatePoolParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateEntityPool_Call) Return(_a0 params.Pool, _a1 error) *Store_UpdateEntityPool_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateEntityPool_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, string, params.UpdatePoolParams) (params.Pool, error)) *Store_UpdateEntityPool_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateEntityScaleSet provides a mock function with given fields: _a0, entity, scaleSetID, param, callback
+func (_m *Store) UpdateEntityScaleSet(_a0 context.Context, entity params.ForgeEntity, scaleSetID uint, param params.UpdateScaleSetParams, callback func(params.ScaleSet, params.ScaleSet) error) (params.ScaleSet, error) {
+ ret := _m.Called(_a0, entity, scaleSetID, param, callback)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateEntityScaleSet")
+ }
+
+ var r0 params.ScaleSet
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, uint, params.UpdateScaleSetParams, func(params.ScaleSet, params.ScaleSet) error) (params.ScaleSet, error)); ok {
+ return rf(_a0, entity, scaleSetID, param, callback)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.ForgeEntity, uint, params.UpdateScaleSetParams, func(params.ScaleSet, params.ScaleSet) error) params.ScaleSet); ok {
+ r0 = rf(_a0, entity, scaleSetID, param, callback)
+ } else {
+ r0 = ret.Get(0).(params.ScaleSet)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.ForgeEntity, uint, params.UpdateScaleSetParams, func(params.ScaleSet, params.ScaleSet) error) error); ok {
+ r1 = rf(_a0, entity, scaleSetID, param, callback)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateEntityScaleSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateEntityScaleSet'
+type Store_UpdateEntityScaleSet_Call struct {
+ *mock.Call
+}
+
+// UpdateEntityScaleSet is a helper method to define mock.On call
+// - _a0 context.Context
+// - entity params.ForgeEntity
+// - scaleSetID uint
+// - param params.UpdateScaleSetParams
+// - callback func(params.ScaleSet , params.ScaleSet) error
+func (_e *Store_Expecter) UpdateEntityScaleSet(_a0 interface{}, entity interface{}, scaleSetID interface{}, param interface{}, callback interface{}) *Store_UpdateEntityScaleSet_Call {
+ return &Store_UpdateEntityScaleSet_Call{Call: _e.mock.On("UpdateEntityScaleSet", _a0, entity, scaleSetID, param, callback)}
+}
+
+func (_c *Store_UpdateEntityScaleSet_Call) Run(run func(_a0 context.Context, entity params.ForgeEntity, scaleSetID uint, param params.UpdateScaleSetParams, callback func(params.ScaleSet, params.ScaleSet) error)) *Store_UpdateEntityScaleSet_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.ForgeEntity), args[2].(uint), args[3].(params.UpdateScaleSetParams), args[4].(func(params.ScaleSet, params.ScaleSet) error))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateEntityScaleSet_Call) Return(updatedScaleSet params.ScaleSet, err error) *Store_UpdateEntityScaleSet_Call {
+ _c.Call.Return(updatedScaleSet, err)
+ return _c
+}
+
+func (_c *Store_UpdateEntityScaleSet_Call) RunAndReturn(run func(context.Context, params.ForgeEntity, uint, params.UpdateScaleSetParams, func(params.ScaleSet, params.ScaleSet) error) (params.ScaleSet, error)) *Store_UpdateEntityScaleSet_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateGiteaCredentials provides a mock function with given fields: ctx, id, param
+func (_m *Store) UpdateGiteaCredentials(ctx context.Context, id uint, param params.UpdateGiteaCredentialsParams) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, id, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateGiteaCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.UpdateGiteaCredentialsParams) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, id, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.UpdateGiteaCredentialsParams) params.ForgeCredentials); ok {
+ r0 = rf(ctx, id, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint, params.UpdateGiteaCredentialsParams) error); ok {
+ r1 = rf(ctx, id, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateGiteaCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateGiteaCredentials'
+type Store_UpdateGiteaCredentials_Call struct {
+ *mock.Call
+}
+
+// UpdateGiteaCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+// - param params.UpdateGiteaCredentialsParams
+func (_e *Store_Expecter) UpdateGiteaCredentials(ctx interface{}, id interface{}, param interface{}) *Store_UpdateGiteaCredentials_Call {
+ return &Store_UpdateGiteaCredentials_Call{Call: _e.mock.On("UpdateGiteaCredentials", ctx, id, param)}
+}
+
+func (_c *Store_UpdateGiteaCredentials_Call) Run(run func(ctx context.Context, id uint, param params.UpdateGiteaCredentialsParams)) *Store_UpdateGiteaCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(params.UpdateGiteaCredentialsParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateGiteaCredentials_Call) Return(gtCreds params.ForgeCredentials, err error) *Store_UpdateGiteaCredentials_Call {
+ _c.Call.Return(gtCreds, err)
+ return _c
+}
+
+func (_c *Store_UpdateGiteaCredentials_Call) RunAndReturn(run func(context.Context, uint, params.UpdateGiteaCredentialsParams) (params.ForgeCredentials, error)) *Store_UpdateGiteaCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateGiteaEndpoint provides a mock function with given fields: _a0, name, param
+func (_m *Store) UpdateGiteaEndpoint(_a0 context.Context, name string, param params.UpdateGiteaEndpointParams) (params.ForgeEndpoint, error) {
+ ret := _m.Called(_a0, name, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateGiteaEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateGiteaEndpointParams) (params.ForgeEndpoint, error)); ok {
+ return rf(_a0, name, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateGiteaEndpointParams) params.ForgeEndpoint); ok {
+ r0 = rf(_a0, name, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEndpoint)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateGiteaEndpointParams) error); ok {
+ r1 = rf(_a0, name, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateGiteaEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateGiteaEndpoint'
+type Store_UpdateGiteaEndpoint_Call struct {
+ *mock.Call
+}
+
+// UpdateGiteaEndpoint is a helper method to define mock.On call
+// - _a0 context.Context
+// - name string
+// - param params.UpdateGiteaEndpointParams
+func (_e *Store_Expecter) UpdateGiteaEndpoint(_a0 interface{}, name interface{}, param interface{}) *Store_UpdateGiteaEndpoint_Call {
+ return &Store_UpdateGiteaEndpoint_Call{Call: _e.mock.On("UpdateGiteaEndpoint", _a0, name, param)}
+}
+
+func (_c *Store_UpdateGiteaEndpoint_Call) Run(run func(_a0 context.Context, name string, param params.UpdateGiteaEndpointParams)) *Store_UpdateGiteaEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateGiteaEndpointParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateGiteaEndpoint_Call) Return(ghEndpoint params.ForgeEndpoint, err error) *Store_UpdateGiteaEndpoint_Call {
+ _c.Call.Return(ghEndpoint, err)
+ return _c
+}
+
+func (_c *Store_UpdateGiteaEndpoint_Call) RunAndReturn(run func(context.Context, string, params.UpdateGiteaEndpointParams) (params.ForgeEndpoint, error)) *Store_UpdateGiteaEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateGithubCredentials provides a mock function with given fields: ctx, id, param
+func (_m *Store) UpdateGithubCredentials(ctx context.Context, id uint, param params.UpdateGithubCredentialsParams) (params.ForgeCredentials, error) {
+ ret := _m.Called(ctx, id, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateGithubCredentials")
+ }
+
+ var r0 params.ForgeCredentials
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.UpdateGithubCredentialsParams) (params.ForgeCredentials, error)); ok {
+ return rf(ctx, id, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint, params.UpdateGithubCredentialsParams) params.ForgeCredentials); ok {
+ r0 = rf(ctx, id, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeCredentials)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint, params.UpdateGithubCredentialsParams) error); ok {
+ r1 = rf(ctx, id, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateGithubCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateGithubCredentials'
+type Store_UpdateGithubCredentials_Call struct {
+ *mock.Call
+}
+
+// UpdateGithubCredentials is a helper method to define mock.On call
+// - ctx context.Context
+// - id uint
+// - param params.UpdateGithubCredentialsParams
+func (_e *Store_Expecter) UpdateGithubCredentials(ctx interface{}, id interface{}, param interface{}) *Store_UpdateGithubCredentials_Call {
+ return &Store_UpdateGithubCredentials_Call{Call: _e.mock.On("UpdateGithubCredentials", ctx, id, param)}
+}
+
+func (_c *Store_UpdateGithubCredentials_Call) Run(run func(ctx context.Context, id uint, param params.UpdateGithubCredentialsParams)) *Store_UpdateGithubCredentials_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint), args[2].(params.UpdateGithubCredentialsParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateGithubCredentials_Call) Return(_a0 params.ForgeCredentials, _a1 error) *Store_UpdateGithubCredentials_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateGithubCredentials_Call) RunAndReturn(run func(context.Context, uint, params.UpdateGithubCredentialsParams) (params.ForgeCredentials, error)) *Store_UpdateGithubCredentials_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateGithubEndpoint provides a mock function with given fields: ctx, name, param
+func (_m *Store) UpdateGithubEndpoint(ctx context.Context, name string, param params.UpdateGithubEndpointParams) (params.ForgeEndpoint, error) {
+ ret := _m.Called(ctx, name, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateGithubEndpoint")
+ }
+
+ var r0 params.ForgeEndpoint
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateGithubEndpointParams) (params.ForgeEndpoint, error)); ok {
+ return rf(ctx, name, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateGithubEndpointParams) params.ForgeEndpoint); ok {
+ r0 = rf(ctx, name, param)
+ } else {
+ r0 = ret.Get(0).(params.ForgeEndpoint)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateGithubEndpointParams) error); ok {
+ r1 = rf(ctx, name, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Store_UpdateGithubEndpoint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateGithubEndpoint'
+type Store_UpdateGithubEndpoint_Call struct {
+ *mock.Call
+}
+
+// UpdateGithubEndpoint is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - param params.UpdateGithubEndpointParams
+func (_e *Store_Expecter) UpdateGithubEndpoint(ctx interface{}, name interface{}, param interface{}) *Store_UpdateGithubEndpoint_Call {
+ return &Store_UpdateGithubEndpoint_Call{Call: _e.mock.On("UpdateGithubEndpoint", ctx, name, param)}
+}
+
+func (_c *Store_UpdateGithubEndpoint_Call) Run(run func(ctx context.Context, name string, param params.UpdateGithubEndpointParams)) *Store_UpdateGithubEndpoint_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateGithubEndpointParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateGithubEndpoint_Call) Return(_a0 params.ForgeEndpoint, _a1 error) *Store_UpdateGithubEndpoint_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateGithubEndpoint_Call) RunAndReturn(run func(context.Context, string, params.UpdateGithubEndpointParams) (params.ForgeEndpoint, error)) *Store_UpdateGithubEndpoint_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateInstance provides a mock function with given fields: ctx, instanceNameOrID, param
+func (_m *Store) UpdateInstance(ctx context.Context, instanceNameOrID string, param params.UpdateInstanceParams) (params.Instance, error) {
+ ret := _m.Called(ctx, instanceNameOrID, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateInstance")
+ }
var r0 params.Instance
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateInstanceParams) (params.Instance, error)); ok {
- return rf(ctx, instanceID, param)
+ return rf(ctx, instanceNameOrID, param)
}
if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateInstanceParams) params.Instance); ok {
- r0 = rf(ctx, instanceID, param)
+ r0 = rf(ctx, instanceNameOrID, param)
} else {
r0 = ret.Get(0).(params.Instance)
}
if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateInstanceParams) error); ok {
- r1 = rf(ctx, instanceID, param)
+ r1 = rf(ctx, instanceNameOrID, param)
} else {
r1 = ret.Error(1)
}
@@ -1236,22 +4805,56 @@ func (_m *Store) UpdateInstance(ctx context.Context, instanceID string, param pa
return r0, r1
}
+// Store_UpdateInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateInstance'
+type Store_UpdateInstance_Call struct {
+ *mock.Call
+}
+
+// UpdateInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - instanceNameOrID string
+// - param params.UpdateInstanceParams
+func (_e *Store_Expecter) UpdateInstance(ctx interface{}, instanceNameOrID interface{}, param interface{}) *Store_UpdateInstance_Call {
+ return &Store_UpdateInstance_Call{Call: _e.mock.On("UpdateInstance", ctx, instanceNameOrID, param)}
+}
+
+func (_c *Store_UpdateInstance_Call) Run(run func(ctx context.Context, instanceNameOrID string, param params.UpdateInstanceParams)) *Store_UpdateInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateInstance_Call) Return(_a0 params.Instance, _a1 error) *Store_UpdateInstance_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateInstance_Call) RunAndReturn(run func(context.Context, string, params.UpdateInstanceParams) (params.Instance, error)) *Store_UpdateInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// UpdateOrganization provides a mock function with given fields: ctx, orgID, param
-func (_m *Store) UpdateOrganization(ctx context.Context, orgID string, param params.UpdateRepositoryParams) (params.Organization, error) {
+func (_m *Store) UpdateOrganization(ctx context.Context, orgID string, param params.UpdateEntityParams) (params.Organization, error) {
ret := _m.Called(ctx, orgID, param)
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateOrganization")
+ }
+
var r0 params.Organization
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateRepositoryParams) (params.Organization, error)); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateEntityParams) (params.Organization, error)); ok {
return rf(ctx, orgID, param)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateRepositoryParams) params.Organization); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateEntityParams) params.Organization); ok {
r0 = rf(ctx, orgID, param)
} else {
r0 = ret.Get(0).(params.Organization)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateRepositoryParams) error); ok {
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateEntityParams) error); ok {
r1 = rf(ctx, orgID, param)
} else {
r1 = ret.Error(1)
@@ -1260,46 +4863,56 @@ func (_m *Store) UpdateOrganization(ctx context.Context, orgID string, param par
return r0, r1
}
-// UpdateOrganizationPool provides a mock function with given fields: ctx, orgID, poolID, param
-func (_m *Store) UpdateOrganizationPool(ctx context.Context, orgID string, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, orgID, poolID, param)
+// Store_UpdateOrganization_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateOrganization'
+type Store_UpdateOrganization_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, orgID, poolID, param)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) params.Pool); ok {
- r0 = rf(ctx, orgID, poolID, param)
- } else {
- r0 = ret.Get(0).(params.Pool)
- }
+// UpdateOrganization is a helper method to define mock.On call
+// - ctx context.Context
+// - orgID string
+// - param params.UpdateEntityParams
+func (_e *Store_Expecter) UpdateOrganization(ctx interface{}, orgID interface{}, param interface{}) *Store_UpdateOrganization_Call {
+ return &Store_UpdateOrganization_Call{Call: _e.mock.On("UpdateOrganization", ctx, orgID, param)}
+}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, params.UpdatePoolParams) error); ok {
- r1 = rf(ctx, orgID, poolID, param)
- } else {
- r1 = ret.Error(1)
- }
+func (_c *Store_UpdateOrganization_Call) Run(run func(ctx context.Context, orgID string, param params.UpdateEntityParams)) *Store_UpdateOrganization_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateEntityParams))
+ })
+ return _c
+}
- return r0, r1
+func (_c *Store_UpdateOrganization_Call) Return(_a0 params.Organization, _a1 error) *Store_UpdateOrganization_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateOrganization_Call) RunAndReturn(run func(context.Context, string, params.UpdateEntityParams) (params.Organization, error)) *Store_UpdateOrganization_Call {
+ _c.Call.Return(run)
+ return _c
}
// UpdateRepository provides a mock function with given fields: ctx, repoID, param
-func (_m *Store) UpdateRepository(ctx context.Context, repoID string, param params.UpdateRepositoryParams) (params.Repository, error) {
+func (_m *Store) UpdateRepository(ctx context.Context, repoID string, param params.UpdateEntityParams) (params.Repository, error) {
ret := _m.Called(ctx, repoID, param)
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateRepository")
+ }
+
var r0 params.Repository
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateRepositoryParams) (params.Repository, error)); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateEntityParams) (params.Repository, error)); ok {
return rf(ctx, repoID, param)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateRepositoryParams) params.Repository); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateEntityParams) params.Repository); ok {
r0 = rf(ctx, repoID, param)
} else {
r0 = ret.Get(0).(params.Repository)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateRepositoryParams) error); ok {
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.UpdateEntityParams) error); ok {
r1 = rf(ctx, repoID, param)
} else {
r1 = ret.Error(1)
@@ -1308,34 +4921,44 @@ func (_m *Store) UpdateRepository(ctx context.Context, repoID string, param para
return r0, r1
}
-// UpdateRepositoryPool provides a mock function with given fields: ctx, repoID, poolID, param
-func (_m *Store) UpdateRepositoryPool(ctx context.Context, repoID string, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- ret := _m.Called(ctx, repoID, poolID, param)
+// Store_UpdateRepository_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateRepository'
+type Store_UpdateRepository_Call struct {
+ *mock.Call
+}
- var r0 params.Pool
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) (params.Pool, error)); ok {
- return rf(ctx, repoID, poolID, param)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string, params.UpdatePoolParams) params.Pool); ok {
- r0 = rf(ctx, repoID, poolID, param)
- } else {
- r0 = ret.Get(0).(params.Pool)
- }
+// UpdateRepository is a helper method to define mock.On call
+// - ctx context.Context
+// - repoID string
+// - param params.UpdateEntityParams
+func (_e *Store_Expecter) UpdateRepository(ctx interface{}, repoID interface{}, param interface{}) *Store_UpdateRepository_Call {
+ return &Store_UpdateRepository_Call{Call: _e.mock.On("UpdateRepository", ctx, repoID, param)}
+}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, params.UpdatePoolParams) error); ok {
- r1 = rf(ctx, repoID, poolID, param)
- } else {
- r1 = ret.Error(1)
- }
+func (_c *Store_UpdateRepository_Call) Run(run func(ctx context.Context, repoID string, param params.UpdateEntityParams)) *Store_UpdateRepository_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateEntityParams))
+ })
+ return _c
+}
- return r0, r1
+func (_c *Store_UpdateRepository_Call) Return(_a0 params.Repository, _a1 error) *Store_UpdateRepository_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateRepository_Call) RunAndReturn(run func(context.Context, string, params.UpdateEntityParams) (params.Repository, error)) *Store_UpdateRepository_Call {
+ _c.Call.Return(run)
+ return _c
}
// UpdateUser provides a mock function with given fields: ctx, user, param
func (_m *Store) UpdateUser(ctx context.Context, user string, param params.UpdateUserParams) (params.User, error) {
ret := _m.Called(ctx, user, param)
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateUser")
+ }
+
var r0 params.User
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, params.UpdateUserParams) (params.User, error)); ok {
@@ -1356,13 +4979,42 @@ func (_m *Store) UpdateUser(ctx context.Context, user string, param params.Updat
return r0, r1
}
-type mockConstructorTestingTNewStore interface {
- mock.TestingT
- Cleanup(func())
+// Store_UpdateUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateUser'
+type Store_UpdateUser_Call struct {
+ *mock.Call
+}
+
+// UpdateUser is a helper method to define mock.On call
+// - ctx context.Context
+// - user string
+// - param params.UpdateUserParams
+func (_e *Store_Expecter) UpdateUser(ctx interface{}, user interface{}, param interface{}) *Store_UpdateUser_Call {
+ return &Store_UpdateUser_Call{Call: _e.mock.On("UpdateUser", ctx, user, param)}
+}
+
+func (_c *Store_UpdateUser_Call) Run(run func(ctx context.Context, user string, param params.UpdateUserParams)) *Store_UpdateUser_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.UpdateUserParams))
+ })
+ return _c
+}
+
+func (_c *Store_UpdateUser_Call) Return(_a0 params.User, _a1 error) *Store_UpdateUser_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Store_UpdateUser_Call) RunAndReturn(run func(context.Context, string, params.UpdateUserParams) (params.User, error)) *Store_UpdateUser_Call {
+ _c.Call.Return(run)
+ return _c
}
// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-func NewStore(t mockConstructorTestingTNewStore) *Store {
+// The first argument is typically a *testing.T value.
+func NewStore(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *Store {
mock := &Store{}
mock.Mock.Test(t)
diff --git a/database/common/store.go b/database/common/store.go
new file mode 100644
index 00000000..0cf5d929
--- /dev/null
+++ b/database/common/store.go
@@ -0,0 +1,193 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+import (
+ "context"
+
+ "github.com/cloudbase/garm/params"
+)
+
+type GithubEndpointStore interface {
+ CreateGithubEndpoint(ctx context.Context, param params.CreateGithubEndpointParams) (params.ForgeEndpoint, error)
+ GetGithubEndpoint(ctx context.Context, name string) (params.ForgeEndpoint, error)
+ ListGithubEndpoints(ctx context.Context) ([]params.ForgeEndpoint, error)
+ UpdateGithubEndpoint(ctx context.Context, name string, param params.UpdateGithubEndpointParams) (params.ForgeEndpoint, error)
+ DeleteGithubEndpoint(ctx context.Context, name string) error
+}
+
+type GithubCredentialsStore interface {
+ CreateGithubCredentials(ctx context.Context, param params.CreateGithubCredentialsParams) (params.ForgeCredentials, error)
+ GetGithubCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error)
+ GetGithubCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error)
+ ListGithubCredentials(ctx context.Context) ([]params.ForgeCredentials, error)
+ UpdateGithubCredentials(ctx context.Context, id uint, param params.UpdateGithubCredentialsParams) (params.ForgeCredentials, error)
+ DeleteGithubCredentials(ctx context.Context, id uint) error
+}
+
+type RepoStore interface {
+ CreateRepository(ctx context.Context, owner, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (param params.Repository, err error)
+ GetRepository(ctx context.Context, owner, name, endpointName string) (params.Repository, error)
+ GetRepositoryByID(ctx context.Context, repoID string) (params.Repository, error)
+ ListRepositories(ctx context.Context, filter params.RepositoryFilter) ([]params.Repository, error)
+ DeleteRepository(ctx context.Context, repoID string) error
+ UpdateRepository(ctx context.Context, repoID string, param params.UpdateEntityParams) (params.Repository, error)
+}
+
+type OrgStore interface {
+ CreateOrganization(ctx context.Context, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (org params.Organization, err error)
+ GetOrganization(ctx context.Context, name, endpointName string) (params.Organization, error)
+ GetOrganizationByID(ctx context.Context, orgID string) (params.Organization, error)
+ ListOrganizations(ctx context.Context, filter params.OrganizationFilter) ([]params.Organization, error)
+ DeleteOrganization(ctx context.Context, orgID string) error
+ UpdateOrganization(ctx context.Context, orgID string, param params.UpdateEntityParams) (params.Organization, error)
+}
+
+type EnterpriseStore interface {
+ CreateEnterprise(ctx context.Context, name string, credentialsName params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (params.Enterprise, error)
+ GetEnterprise(ctx context.Context, name, endpointName string) (params.Enterprise, error)
+ GetEnterpriseByID(ctx context.Context, enterpriseID string) (params.Enterprise, error)
+ ListEnterprises(ctx context.Context, filter params.EnterpriseFilter) ([]params.Enterprise, error)
+ DeleteEnterprise(ctx context.Context, enterpriseID string) error
+ UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateEntityParams) (params.Enterprise, error)
+}
+
+type PoolStore interface {
+ // Probably a bad idea without some king of filter or at least pagination
+ // nolint:golangci-lint,godox
+ // TODO: add filter/pagination
+ ListAllPools(ctx context.Context) ([]params.Pool, error)
+ GetPoolByID(ctx context.Context, poolID string) (params.Pool, error)
+ DeletePoolByID(ctx context.Context, poolID string) error
+
+ ListPoolInstances(ctx context.Context, poolID string) ([]params.Instance, error)
+
+ PoolInstanceCount(ctx context.Context, poolID string) (int64, error)
+ FindPoolsMatchingAllTags(ctx context.Context, entityType params.ForgeEntityType, entityID string, tags []string) ([]params.Pool, error)
+}
+
+type UserStore interface {
+ GetUser(ctx context.Context, user string) (params.User, error)
+ GetUserByID(ctx context.Context, userID string) (params.User, error)
+ GetAdminUser(ctx context.Context) (params.User, error)
+
+ CreateUser(ctx context.Context, user params.NewUserParams) (params.User, error)
+ UpdateUser(ctx context.Context, user string, param params.UpdateUserParams) (params.User, error)
+ HasAdminUser(ctx context.Context) bool
+}
+
+type InstanceStore interface {
+ CreateInstance(ctx context.Context, poolID string, param params.CreateInstanceParams) (params.Instance, error)
+ DeleteInstance(ctx context.Context, poolID string, instanceNameOrID string) error
+ DeleteInstanceByName(ctx context.Context, instanceName string) error
+ UpdateInstance(ctx context.Context, instanceNameOrID string, param params.UpdateInstanceParams) (params.Instance, error)
+
+ // Probably a bad idea without some king of filter or at least pagination
+ //
+ // nolint:golangci-lint,godox
+ // TODO: add filter/pagination
+ ListAllInstances(ctx context.Context) ([]params.Instance, error)
+
+ GetInstance(ctx context.Context, instanceNameOrID string) (params.Instance, error)
+ AddInstanceEvent(ctx context.Context, instanceNameOrID string, event params.EventType, eventLevel params.EventLevel, eventMessage string) error
+}
+
+type JobsStore interface {
+ CreateOrUpdateJob(ctx context.Context, job params.Job) (params.Job, error)
+ ListEntityJobsByStatus(ctx context.Context, entityType params.ForgeEntityType, entityID string, status params.JobStatus) ([]params.Job, error)
+ ListJobsByStatus(ctx context.Context, status params.JobStatus) ([]params.Job, error)
+ ListAllJobs(ctx context.Context) ([]params.Job, error)
+
+ GetJobByID(ctx context.Context, jobID int64) (params.Job, error)
+ DeleteJob(ctx context.Context, jobID int64) error
+ UnlockJob(ctx context.Context, jobID int64, entityID string) error
+ LockJob(ctx context.Context, jobID int64, entityID string) error
+ BreakLockJobIsQueued(ctx context.Context, jobID int64) error
+
+ DeleteCompletedJobs(ctx context.Context) error
+}
+
+type EntityPoolStore interface {
+ CreateEntityPool(ctx context.Context, entity params.ForgeEntity, param params.CreatePoolParams) (params.Pool, error)
+ GetEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string) (params.Pool, error)
+ DeleteEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string) error
+ UpdateEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string, param params.UpdatePoolParams) (params.Pool, error)
+
+ ListEntityPools(ctx context.Context, entity params.ForgeEntity) ([]params.Pool, error)
+ ListEntityInstances(ctx context.Context, entity params.ForgeEntity) ([]params.Instance, error)
+}
+
+type ControllerStore interface {
+ ControllerInfo() (params.ControllerInfo, error)
+ InitController() (params.ControllerInfo, error)
+ UpdateController(info params.UpdateControllerParams) (params.ControllerInfo, error)
+}
+
+type ScaleSetsStore interface {
+ ListAllScaleSets(ctx context.Context) ([]params.ScaleSet, error)
+ CreateEntityScaleSet(_ context.Context, entity params.ForgeEntity, param params.CreateScaleSetParams) (scaleSet params.ScaleSet, err error)
+ ListEntityScaleSets(_ context.Context, entity params.ForgeEntity) ([]params.ScaleSet, error)
+ UpdateEntityScaleSet(_ context.Context, entity params.ForgeEntity, scaleSetID uint, param params.UpdateScaleSetParams, callback func(old, newSet params.ScaleSet) error) (updatedScaleSet params.ScaleSet, err error)
+ GetScaleSetByID(ctx context.Context, scaleSet uint) (params.ScaleSet, error)
+ DeleteScaleSetByID(ctx context.Context, scaleSetID uint) (err error)
+ SetScaleSetLastMessageID(ctx context.Context, scaleSetID uint, lastMessageID int64) error
+ SetScaleSetDesiredRunnerCount(ctx context.Context, scaleSetID uint, desiredRunnerCount int) error
+}
+
+type ScaleSetInstanceStore interface {
+ ListScaleSetInstances(_ context.Context, scalesetID uint) ([]params.Instance, error)
+ CreateScaleSetInstance(_ context.Context, scaleSetID uint, param params.CreateInstanceParams) (instance params.Instance, err error)
+}
+
+type GiteaEndpointStore interface {
+ CreateGiteaEndpoint(_ context.Context, param params.CreateGiteaEndpointParams) (ghEndpoint params.ForgeEndpoint, err error)
+ ListGiteaEndpoints(_ context.Context) ([]params.ForgeEndpoint, error)
+ DeleteGiteaEndpoint(_ context.Context, name string) (err error)
+ GetGiteaEndpoint(_ context.Context, name string) (params.ForgeEndpoint, error)
+ UpdateGiteaEndpoint(_ context.Context, name string, param params.UpdateGiteaEndpointParams) (ghEndpoint params.ForgeEndpoint, err error)
+}
+
+type GiteaCredentialsStore interface {
+ CreateGiteaCredentials(ctx context.Context, param params.CreateGiteaCredentialsParams) (gtCreds params.ForgeCredentials, err error)
+ GetGiteaCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error)
+ GetGiteaCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error)
+ ListGiteaCredentials(ctx context.Context) ([]params.ForgeCredentials, error)
+ UpdateGiteaCredentials(ctx context.Context, id uint, param params.UpdateGiteaCredentialsParams) (gtCreds params.ForgeCredentials, err error)
+ DeleteGiteaCredentials(ctx context.Context, id uint) (err error)
+}
+
+//go:generate go run github.com/vektra/mockery/v2@latest
+type Store interface {
+ RepoStore
+ OrgStore
+ EnterpriseStore
+ PoolStore
+ UserStore
+ InstanceStore
+ JobsStore
+ GithubEndpointStore
+ GithubCredentialsStore
+ ControllerStore
+ EntityPoolStore
+ ScaleSetsStore
+ ScaleSetInstanceStore
+ GiteaEndpointStore
+ GiteaCredentialsStore
+
+ ControllerInfo() (params.ControllerInfo, error)
+ InitController() (params.ControllerInfo, error)
+ GetForgeEntity(_ context.Context, entityType params.ForgeEntityType, entityID string) (params.ForgeEntity, error)
+ AddEntityEvent(ctx context.Context, entity params.ForgeEntity, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error
+}
diff --git a/database/common/watcher.go b/database/common/watcher.go
new file mode 100644
index 00000000..94152094
--- /dev/null
+++ b/database/common/watcher.go
@@ -0,0 +1,69 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+import "context"
+
+type (
+ DatabaseEntityType string
+ OperationType string
+ PayloadFilterFunc func(ChangePayload) bool
+)
+
+const (
+ RepositoryEntityType DatabaseEntityType = "repository"
+ OrganizationEntityType DatabaseEntityType = "organization"
+ EnterpriseEntityType DatabaseEntityType = "enterprise"
+ PoolEntityType DatabaseEntityType = "pool"
+ UserEntityType DatabaseEntityType = "user"
+ InstanceEntityType DatabaseEntityType = "instance"
+ JobEntityType DatabaseEntityType = "job"
+ ControllerEntityType DatabaseEntityType = "controller"
+ GithubCredentialsEntityType DatabaseEntityType = "github_credentials" // #nosec G101
+ GiteaCredentialsEntityType DatabaseEntityType = "gitea_credentials" // #nosec G101
+ GithubEndpointEntityType DatabaseEntityType = "github_endpoint"
+ ScaleSetEntityType DatabaseEntityType = "scaleset"
+)
+
+const (
+ CreateOperation OperationType = "create"
+ UpdateOperation OperationType = "update"
+ DeleteOperation OperationType = "delete"
+)
+
+type ChangePayload struct {
+ EntityType DatabaseEntityType `json:"entity-type"`
+ Operation OperationType `json:"operation"`
+ Payload interface{} `json:"payload"`
+}
+
+type Consumer interface {
+ Watch() <-chan ChangePayload
+ IsClosed() bool
+ Close()
+ SetFilters(filters ...PayloadFilterFunc)
+}
+
+type Producer interface {
+ Notify(ChangePayload) error
+ IsClosed() bool
+ Close()
+}
+
+type Watcher interface {
+ RegisterProducer(ctx context.Context, ID string) (Producer, error)
+ RegisterConsumer(ctx context.Context, ID string, filters ...PayloadFilterFunc) (Consumer, error)
+ Close()
+}
diff --git a/database/sql/common_test.go b/database/sql/common_test.go
new file mode 100644
index 00000000..a3c62e06
--- /dev/null
+++ b/database/sql/common_test.go
@@ -0,0 +1,21 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+const (
+ wrongPassphrase = "wrong-passphrase"
+ webhookSecret = "webhook-secret"
+ falseString = "false"
+)
diff --git a/database/sql/controller.go b/database/sql/controller.go
index 283d913c..5bf60763 100644
--- a/database/sql/controller.go
+++ b/database/sql/controller.go
@@ -15,26 +15,52 @@
package sql
import (
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
+ "errors"
+ "fmt"
+ "net/url"
- "github.com/pkg/errors"
- uuid "github.com/satori/go.uuid"
+ "github.com/google/uuid"
"gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/util/appdefaults"
)
+func dbControllerToCommonController(dbInfo ControllerInfo) (params.ControllerInfo, error) {
+ url, err := url.JoinPath(dbInfo.WebhookBaseURL, dbInfo.ControllerID.String())
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error joining webhook URL: %w", err)
+ }
+
+ return params.ControllerInfo{
+ ControllerID: dbInfo.ControllerID,
+ MetadataURL: dbInfo.MetadataURL,
+ WebhookURL: dbInfo.WebhookBaseURL,
+ ControllerWebhookURL: url,
+ CallbackURL: dbInfo.CallbackURL,
+ MinimumJobAgeBackoff: dbInfo.MinimumJobAgeBackoff,
+ Version: appdefaults.GetVersion(),
+ }, nil
+}
+
func (s *sqlDatabase) ControllerInfo() (params.ControllerInfo, error) {
var info ControllerInfo
q := s.conn.Model(&ControllerInfo{}).First(&info)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return params.ControllerInfo{}, errors.Wrap(runnerErrors.ErrNotFound, "fetching controller info")
+ return params.ControllerInfo{}, fmt.Errorf("error fetching controller info: %w", runnerErrors.ErrNotFound)
}
- return params.ControllerInfo{}, errors.Wrap(q.Error, "fetching controller info")
+ return params.ControllerInfo{}, fmt.Errorf("error fetching controller info: %w", q.Error)
}
- return params.ControllerInfo{
- ControllerID: info.ControllerID,
- }, nil
+
+ paramInfo, err := dbControllerToCommonController(info)
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error converting controller info: %w", err)
+ }
+
+ return paramInfo, nil
}
func (s *sqlDatabase) InitController() (params.ControllerInfo, error) {
@@ -42,21 +68,75 @@ func (s *sqlDatabase) InitController() (params.ControllerInfo, error) {
return params.ControllerInfo{}, runnerErrors.NewConflictError("controller already initialized")
}
- newID, err := uuid.NewV4()
+ newID, err := uuid.NewRandom()
if err != nil {
- return params.ControllerInfo{}, errors.Wrap(err, "generating UUID")
+ return params.ControllerInfo{}, fmt.Errorf("error generating UUID: %w", err)
}
newInfo := ControllerInfo{
- ControllerID: newID,
+ ControllerID: newID,
+ MinimumJobAgeBackoff: 30,
}
q := s.conn.Save(&newInfo)
if q.Error != nil {
- return params.ControllerInfo{}, errors.Wrap(q.Error, "saving controller info")
+ return params.ControllerInfo{}, fmt.Errorf("error saving controller info: %w", q.Error)
}
return params.ControllerInfo{
ControllerID: newInfo.ControllerID,
}, nil
}
+
+func (s *sqlDatabase) UpdateController(info params.UpdateControllerParams) (paramInfo params.ControllerInfo, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.ControllerEntityType, common.UpdateOperation, paramInfo)
+ }
+ }()
+ var dbInfo ControllerInfo
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ q := tx.Model(&ControllerInfo{}).First(&dbInfo)
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching controller info: %w", runnerErrors.ErrNotFound)
+ }
+ return fmt.Errorf("error fetching controller info: %w", q.Error)
+ }
+
+ if err := info.Validate(); err != nil {
+ return fmt.Errorf("error validating controller info: %w", err)
+ }
+
+ if info.MetadataURL != nil {
+ dbInfo.MetadataURL = *info.MetadataURL
+ }
+
+ if info.CallbackURL != nil {
+ dbInfo.CallbackURL = *info.CallbackURL
+ }
+
+ if info.WebhookURL != nil {
+ dbInfo.WebhookBaseURL = *info.WebhookURL
+ }
+
+ if info.MinimumJobAgeBackoff != nil {
+ dbInfo.MinimumJobAgeBackoff = *info.MinimumJobAgeBackoff
+ }
+
+ q = tx.Save(&dbInfo)
+ if q.Error != nil {
+ return fmt.Errorf("error saving controller info: %w", q.Error)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error updating controller info: %w", err)
+ }
+
+ paramInfo, err = dbControllerToCommonController(dbInfo)
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error converting controller info: %w", err)
+ }
+ return paramInfo, nil
+}
diff --git a/database/sql/controller_test.go b/database/sql/controller_test.go
index 2cf9cf48..949f675f 100644
--- a/database/sql/controller_test.go
+++ b/database/sql/controller_test.go
@@ -19,11 +19,11 @@ import (
"fmt"
"testing"
- dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
- garmTesting "github.com/cloudbase/garm/internal/testing"
-
"github.com/stretchr/testify/suite"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ garmTesting "github.com/cloudbase/garm/internal/testing" //nolint:typecheck
)
type CtrlTestSuite struct {
diff --git a/database/sql/enterprise.go b/database/sql/enterprise.go
index a26ace71..d201cd21 100644
--- a/database/sql/enterprise.go
+++ b/database/sql/enterprise.go
@@ -1,84 +1,140 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package sql
import (
"context"
+ "errors"
+ "fmt"
+ "log/slog"
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/util"
-
- "github.com/pkg/errors"
- uuid "github.com/satori/go.uuid"
- "gorm.io/datatypes"
+ "github.com/google/uuid"
"gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm-provider-common/util"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
)
-func (s *sqlDatabase) CreateEnterprise(ctx context.Context, name, credentialsName, webhookSecret string) (params.Enterprise, error) {
+func (s *sqlDatabase) CreateEnterprise(ctx context.Context, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (paramEnt params.Enterprise, err error) {
if webhookSecret == "" {
return params.Enterprise{}, errors.New("creating enterprise: missing secret")
}
- secret, err := util.Aes256EncodeString(webhookSecret, s.cfg.Passphrase)
- if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "encoding secret")
+ if credentials.ForgeType != params.GithubEndpointType {
+ return params.Enterprise{}, fmt.Errorf("enterprises are not supported on this forge type: %w", runnerErrors.ErrBadRequest)
}
+
+ secret, err := util.Seal([]byte(webhookSecret), []byte(s.cfg.Passphrase))
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error encoding secret: %w", err)
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.EnterpriseEntityType, common.CreateOperation, paramEnt)
+ }
+ }()
newEnterprise := Enterprise{
- Name: name,
- WebhookSecret: secret,
- CredentialsName: credentialsName,
+ Name: name,
+ WebhookSecret: secret,
+ PoolBalancerType: poolBalancerType,
}
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ newEnterprise.CredentialsID = &credentials.ID
+ newEnterprise.EndpointName = &credentials.Endpoint.Name
- q := s.conn.Create(&newEnterprise)
- if q.Error != nil {
- return params.Enterprise{}, errors.Wrap(q.Error, "creating enterprise")
- }
+ q := tx.Create(&newEnterprise)
+ if q.Error != nil {
+ return fmt.Errorf("error creating enterprise: %w", q.Error)
+ }
- param, err := s.sqlToCommonEnterprise(newEnterprise)
+ newEnterprise, err = s.getEnterpriseByID(ctx, tx, newEnterprise.ID.String(), "Pools", "Credentials", "Endpoint", "Credentials.Endpoint")
+ if err != nil {
+ return fmt.Errorf("error creating enterprise: %w", err)
+ }
+ return nil
+ })
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "creating enterprise")
+ return params.Enterprise{}, fmt.Errorf("error creating enterprise: %w", err)
}
- return param, nil
+ ret, err := s.GetEnterpriseByID(ctx, newEnterprise.ID.String())
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error creating enterprise: %w", err)
+ }
+
+ return ret, nil
}
-func (s *sqlDatabase) GetEnterprise(ctx context.Context, name string) (params.Enterprise, error) {
- enterprise, err := s.getEnterprise(ctx, name)
+func (s *sqlDatabase) GetEnterprise(ctx context.Context, name, endpointName string) (params.Enterprise, error) {
+ enterprise, err := s.getEnterprise(ctx, name, endpointName)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
- param, err := s.sqlToCommonEnterprise(enterprise)
+ param, err := s.sqlToCommonEnterprise(enterprise, true)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
return param, nil
}
func (s *sqlDatabase) GetEnterpriseByID(ctx context.Context, enterpriseID string) (params.Enterprise, error) {
- enterprise, err := s.getEnterpriseByID(ctx, enterpriseID, "Pools")
+ preloadList := []string{
+ "Pools",
+ "Credentials",
+ "Endpoint",
+ "Credentials.Endpoint",
+ "Events",
+ }
+ enterprise, err := s.getEnterpriseByID(ctx, s.conn, enterpriseID, preloadList...)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
- param, err := s.sqlToCommonEnterprise(enterprise)
+ param, err := s.sqlToCommonEnterprise(enterprise, true)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
return param, nil
}
-func (s *sqlDatabase) ListEnterprises(ctx context.Context) ([]params.Enterprise, error) {
+func (s *sqlDatabase) ListEnterprises(_ context.Context, filter params.EnterpriseFilter) ([]params.Enterprise, error) {
var enterprises []Enterprise
- q := s.conn.Find(&enterprises)
+ q := s.conn.
+ Preload("Credentials").
+ Preload("Credentials.Endpoint").
+ Preload("Endpoint")
+ if filter.Name != "" {
+ q = q.Where("name = ?", filter.Name)
+ }
+ if filter.Endpoint != "" {
+ q = q.Where("endpoint_name = ?", filter.Endpoint)
+ }
+ q = q.Find(&enterprises)
if q.Error != nil {
- return []params.Enterprise{}, errors.Wrap(q.Error, "fetching enterprises")
+ return []params.Enterprise{}, fmt.Errorf("error fetching enterprises: %w", q.Error)
}
ret := make([]params.Enterprise, len(enterprises))
for idx, val := range enterprises {
var err error
- ret[idx], err = s.sqlToCommonEnterprise(val)
+ ret[idx], err = s.sqlToCommonEnterprise(val, true)
if err != nil {
- return nil, errors.Wrap(err, "fetching enterprises")
+ return nil, fmt.Errorf("error fetching enterprises: %w", err)
}
}
@@ -86,201 +142,122 @@ func (s *sqlDatabase) ListEnterprises(ctx context.Context) ([]params.Enterprise,
}
func (s *sqlDatabase) DeleteEnterprise(ctx context.Context, enterpriseID string) error {
- enterprise, err := s.getEnterpriseByID(ctx, enterpriseID)
+ enterprise, err := s.getEnterpriseByID(ctx, s.conn, enterpriseID, "Endpoint", "Credentials", "Credentials.Endpoint")
if err != nil {
- return errors.Wrap(err, "fetching enterprise")
+ return fmt.Errorf("error fetching enterprise: %w", err)
}
+ defer func(ent Enterprise) {
+ if err == nil {
+ asParams, innerErr := s.sqlToCommonEnterprise(ent, true)
+ if innerErr == nil {
+ s.sendNotify(common.EnterpriseEntityType, common.DeleteOperation, asParams)
+ } else {
+ slog.With(slog.Any("error", innerErr)).ErrorContext(ctx, "error sending delete notification", "enterprise", enterpriseID)
+ }
+ }
+ }(enterprise)
+
q := s.conn.Unscoped().Delete(&enterprise)
if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting enterprise")
+ return fmt.Errorf("error deleting enterprise: %w", q.Error)
}
return nil
}
-func (s *sqlDatabase) UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateRepositoryParams) (params.Enterprise, error) {
- enterprise, err := s.getEnterpriseByID(ctx, enterpriseID)
- if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
- }
-
- if param.CredentialsName != "" {
- enterprise.CredentialsName = param.CredentialsName
- }
-
- if param.WebhookSecret != "" {
- secret, err := util.Aes256EncodeString(param.WebhookSecret, s.cfg.Passphrase)
- if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "encoding secret")
+func (s *sqlDatabase) UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateEntityParams) (newParams params.Enterprise, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.EnterpriseEntityType, common.UpdateOperation, newParams)
+ }
+ }()
+ var enterprise Enterprise
+ var creds GithubCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var err error
+ enterprise, err = s.getEnterpriseByID(ctx, tx, enterpriseID)
+ if err != nil {
+ return fmt.Errorf("error fetching enterprise: %w", err)
}
- enterprise.WebhookSecret = secret
- }
- q := s.conn.Save(&enterprise)
- if q.Error != nil {
- return params.Enterprise{}, errors.Wrap(q.Error, "saving enterprise")
- }
+ if enterprise.EndpointName == nil {
+ return fmt.Errorf("error enterprise has no endpoint: %w", runnerErrors.ErrUnprocessable)
+ }
- newParams, err := s.sqlToCommonEnterprise(enterprise)
+ if param.CredentialsName != "" {
+ creds, err = s.getGithubCredentialsByName(ctx, tx, param.CredentialsName, false)
+ if err != nil {
+ return fmt.Errorf("error fetching credentials: %w", err)
+ }
+ if creds.EndpointName == nil {
+ return fmt.Errorf("error credentials have no endpoint: %w", runnerErrors.ErrUnprocessable)
+ }
+
+ if *creds.EndpointName != *enterprise.EndpointName {
+ return fmt.Errorf("error endpoint mismatch: %w", runnerErrors.ErrBadRequest)
+ }
+ enterprise.CredentialsID = &creds.ID
+ }
+ if param.WebhookSecret != "" {
+ secret, err := util.Seal([]byte(param.WebhookSecret), []byte(s.cfg.Passphrase))
+ if err != nil {
+ return fmt.Errorf("error encoding secret: %w", err)
+ }
+ enterprise.WebhookSecret = secret
+ }
+
+ if param.PoolBalancerType != "" {
+ enterprise.PoolBalancerType = param.PoolBalancerType
+ }
+
+ q := tx.Save(&enterprise)
+ if q.Error != nil {
+ return fmt.Errorf("error saving enterprise: %w", q.Error)
+ }
+
+ return nil
+ })
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "updating enterprise")
+ return params.Enterprise{}, fmt.Errorf("error updating enterprise: %w", err)
+ }
+
+ enterprise, err = s.getEnterpriseByID(ctx, s.conn, enterpriseID, "Endpoint", "Credentials", "Credentials.Endpoint")
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error updating enterprise: %w", err)
+ }
+ newParams, err = s.sqlToCommonEnterprise(enterprise, true)
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error updating enterprise: %w", err)
}
return newParams, nil
}
-func (s *sqlDatabase) CreateEnterprisePool(ctx context.Context, enterpriseID string, param params.CreatePoolParams) (params.Pool, error) {
- if len(param.Tags) == 0 {
- return params.Pool{}, runnerErrors.NewBadRequestError("no tags specified")
- }
-
- enterprise, err := s.getEnterpriseByID(ctx, enterpriseID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching enterprise")
- }
-
- newPool := Pool{
- ProviderName: param.ProviderName,
- MaxRunners: param.MaxRunners,
- MinIdleRunners: param.MinIdleRunners,
- RunnerPrefix: param.GetRunnerPrefix(),
- Image: param.Image,
- Flavor: param.Flavor,
- OSType: param.OSType,
- OSArch: param.OSArch,
- EnterpriseID: enterprise.ID,
- Enabled: param.Enabled,
- RunnerBootstrapTimeout: param.RunnerBootstrapTimeout,
- }
-
- if len(param.ExtraSpecs) > 0 {
- newPool.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
- }
-
- _, err = s.getEnterprisePoolByUniqueFields(ctx, enterpriseID, newPool.ProviderName, newPool.Image, newPool.Flavor)
- if err != nil {
- if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Pool{}, errors.Wrap(err, "creating pool")
- }
- } else {
- return params.Pool{}, runnerErrors.NewConflictError("pool with the same image and flavor already exists on this provider")
- }
-
- tags := []Tag{}
- for _, val := range param.Tags {
- t, err := s.getOrCreateTag(val)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching tag")
- }
- tags = append(tags, t)
- }
-
- q := s.conn.Create(&newPool)
- if q.Error != nil {
- return params.Pool{}, errors.Wrap(q.Error, "adding pool")
- }
-
- for _, tt := range tags {
- if err := s.conn.Model(&newPool).Association("Tags").Append(&tt); err != nil {
- return params.Pool{}, errors.Wrap(err, "saving tag")
- }
- }
-
- pool, err := s.getPoolByID(ctx, newPool.ID.String(), "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) GetEnterprisePool(ctx context.Context, enterpriseID, poolID string) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.EnterprisePool, enterpriseID, poolID, "Tags", "Instances")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) DeleteEnterprisePool(ctx context.Context, enterpriseID, poolID string) error {
- pool, err := s.getEntityPool(ctx, params.EnterprisePool, enterpriseID, poolID)
- if err != nil {
- return errors.Wrap(err, "looking up enterprise pool")
- }
- q := s.conn.Unscoped().Delete(&pool)
- if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting pool")
- }
- return nil
-}
-
-func (s *sqlDatabase) UpdateEnterprisePool(ctx context.Context, enterpriseID, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.EnterprisePool, enterpriseID, poolID, "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.updatePool(pool, param)
-}
-
-func (s *sqlDatabase) FindEnterprisePoolByTags(ctx context.Context, enterpriseID string, tags []string) (params.Pool, error) {
- pool, err := s.findPoolByTags(enterpriseID, "enterprise_id", tags)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool, nil
-}
-
-func (s *sqlDatabase) ListEnterprisePools(ctx context.Context, enterpriseID string) ([]params.Pool, error) {
- pools, err := s.getEnterprisePools(ctx, enterpriseID, "Tags", "Enterprise")
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
-
- ret := make([]params.Pool, len(pools))
- for idx, pool := range pools {
- ret[idx] = s.sqlToCommonPool(pool)
- }
-
- return ret, nil
-}
-
-func (s *sqlDatabase) ListEnterpriseInstances(ctx context.Context, enterpriseID string) ([]params.Instance, error) {
- pools, err := s.getEnterprisePools(ctx, enterpriseID, "Instances")
- if err != nil {
- return nil, errors.Wrap(err, "fetching enterprise")
- }
- ret := []params.Instance{}
- for _, pool := range pools {
- for _, instance := range pool.Instances {
- ret = append(ret, s.sqlToParamsInstance(instance))
- }
- }
- return ret, nil
-}
-
-func (s *sqlDatabase) getEnterprise(ctx context.Context, name string) (Enterprise, error) {
+func (s *sqlDatabase) getEnterprise(_ context.Context, name, endpointName string) (Enterprise, error) {
var enterprise Enterprise
- q := s.conn.Where("name = ? COLLATE NOCASE", name)
- q = q.First(&enterprise)
+ q := s.conn.Where("name = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE", name, endpointName).
+ Preload("Credentials").
+ Preload("Credentials.Endpoint").
+ Preload("Endpoint").
+ First(&enterprise)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Enterprise{}, runnerErrors.ErrNotFound
}
- return Enterprise{}, errors.Wrap(q.Error, "fetching enterprise from database")
+ return Enterprise{}, fmt.Errorf("error fetching enterprise from database: %w", q.Error)
}
return enterprise, nil
}
-func (s *sqlDatabase) getEnterpriseByID(ctx context.Context, id string, preload ...string) (Enterprise, error) {
- u, err := uuid.FromString(id)
+func (s *sqlDatabase) getEnterpriseByID(_ context.Context, tx *gorm.DB, id string, preload ...string) (Enterprise, error) {
+ u, err := uuid.Parse(id)
if err != nil {
- return Enterprise{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return Enterprise{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
}
var enterprise Enterprise
- q := s.conn
+ q := tx
if len(preload) > 0 {
for _, field := range preload {
q = q.Preload(field)
@@ -292,51 +269,7 @@ func (s *sqlDatabase) getEnterpriseByID(ctx context.Context, id string, preload
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Enterprise{}, runnerErrors.ErrNotFound
}
- return Enterprise{}, errors.Wrap(q.Error, "fetching enterprise from database")
+ return Enterprise{}, fmt.Errorf("error fetching enterprise from database: %w", q.Error)
}
return enterprise, nil
}
-
-func (s *sqlDatabase) getEnterprisePoolByUniqueFields(ctx context.Context, enterpriseID string, provider, image, flavor string) (Pool, error) {
- enterprise, err := s.getEnterpriseByID(ctx, enterpriseID)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching enterprise")
- }
-
- q := s.conn
- var pool []Pool
- err = q.Model(&enterprise).Association("Pools").Find(&pool, "provider_name = ? and image = ? and flavor = ?", provider, image, flavor)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching pool")
- }
- if len(pool) == 0 {
- return Pool{}, runnerErrors.ErrNotFound
- }
-
- return pool[0], nil
-}
-
-func (s *sqlDatabase) getEnterprisePools(ctx context.Context, enterpriseID string, preload ...string) ([]Pool, error) {
- _, err := s.getEnterpriseByID(ctx, enterpriseID)
- if err != nil {
- return nil, errors.Wrap(err, "fetching enterprise")
- }
-
- q := s.conn
- if len(preload) > 0 {
- for _, item := range preload {
- q = q.Preload(item)
- }
- }
-
- var pools []Pool
- err = q.Model(&Pool{}).Where("enterprise_id = ?", enterpriseID).
- Omit("extra_specs").
- Find(&pools).Error
-
- if err != nil {
- return nil, errors.Wrap(err, "fetching pool")
- }
-
- return pools, nil
-}
diff --git a/database/sql/enterprise_test.go b/database/sql/enterprise_test.go
index bd163ed2..9192a362 100644
--- a/database/sql/enterprise_test.go
+++ b/database/sql/enterprise_test.go
@@ -22,17 +22,16 @@ import (
"sort"
"testing"
- "github.com/cloudbase/garm/params"
-
- dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
- garmTesting "github.com/cloudbase/garm/internal/testing"
-
"github.com/stretchr/testify/suite"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ "github.com/cloudbase/garm/auth"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type EnterpriseTestFixtures struct {
@@ -40,7 +39,7 @@ type EnterpriseTestFixtures struct {
CreateEnterpriseParams params.CreateEnterpriseParams
CreatePoolParams params.CreatePoolParams
CreateInstanceParams params.CreateInstanceParams
- UpdateRepoParams params.UpdateRepositoryParams
+ UpdateRepoParams params.UpdateEntityParams
UpdatePoolParams params.UpdatePoolParams
SQLMock sqlmock.Sqlmock
}
@@ -50,6 +49,15 @@ type EnterpriseTestSuite struct {
Store dbCommon.Store
StoreSQLMocked *sqlDatabase
Fixtures *EnterpriseTestFixtures
+
+ adminCtx context.Context
+ adminUserID string
+
+ testCreds params.ForgeCredentials
+ ghesCreds params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
+ ghesEndpoint params.ForgeEndpoint
}
func (s *EnterpriseTestSuite) equalInstancesByName(expected, actual []params.Instance) {
@@ -78,17 +86,29 @@ func (s *EnterpriseTestSuite) SetupTest() {
}
s.Store = db
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.adminCtx = adminCtx
+ s.adminUserID = auth.UserID(adminCtx)
+ s.Require().NotEmpty(s.adminUserID)
+
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.ghesEndpoint = garmTesting.CreateGHESEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.ghesCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "ghes-creds", db, s.T(), s.ghesEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+
// create some enterprise objects in the database, for testing purposes
enterprises := []params.Enterprise{}
for i := 1; i <= 3; i++ {
enterprise, err := db.CreateEnterprise(
- context.Background(),
+ s.adminCtx,
fmt.Sprintf("test-enterprise-%d", i),
- fmt.Sprintf("test-creds-%d", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%d", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
- s.FailNow(fmt.Sprintf("failed to create database object (test-enterprise-%d)", i))
+ s.FailNow(fmt.Sprintf("failed to create database object (test-enterprise-%d): %q", i, err))
}
enterprises = append(enterprises, enterprise)
@@ -105,7 +125,7 @@ func (s *EnterpriseTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -124,7 +144,7 @@ func (s *EnterpriseTestSuite) SetupTest() {
Enterprises: enterprises,
CreateEnterpriseParams: params.CreateEnterpriseParams{
Name: "new-test-enterprise",
- CredentialsName: "new-creds",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "new-webhook-secret",
},
CreatePoolParams: params.CreatePoolParams{
@@ -136,14 +156,14 @@ func (s *EnterpriseTestSuite) SetupTest() {
Flavor: "test-flavor",
OSType: "linux",
OSArch: "amd64",
- Tags: []string{"self-hosted", "arm64", "linux"},
+ Tags: []string{"amd64-linux-runner"},
},
CreateInstanceParams: params.CreateInstanceParams{
Name: "test-instance-name",
OSType: "linux",
},
- UpdateRepoParams: params.UpdateRepositoryParams{
- CredentialsName: "test-update-creds",
+ UpdateRepoParams: params.UpdateEntityParams{
+ CredentialsName: s.secondaryTestCreds.Name,
WebhookSecret: "test-update-repo-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -160,19 +180,20 @@ func (s *EnterpriseTestSuite) SetupTest() {
func (s *EnterpriseTestSuite) TestCreateEnterprise() {
// call tested function
enterprise, err := s.Store.CreateEnterprise(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateEnterpriseParams.Name,
- s.Fixtures.CreateEnterpriseParams.CredentialsName,
- s.Fixtures.CreateEnterpriseParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateEnterpriseParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
// assertions
s.Require().Nil(err)
- storeEnterprise, err := s.Store.GetEnterpriseByID(context.Background(), enterprise.ID)
+ storeEnterprise, err := s.Store.GetEnterpriseByID(s.adminCtx, enterprise.ID)
if err != nil {
s.FailNow(fmt.Sprintf("failed to get enterprise by id: %v", err))
}
s.Require().Equal(storeEnterprise.Name, enterprise.Name)
- s.Require().Equal(storeEnterprise.CredentialsName, enterprise.CredentialsName)
+ s.Require().Equal(storeEnterprise.Credentials.Name, enterprise.Credentials.Name)
s.Require().Equal(storeEnterprise.WebhookSecret, enterprise.WebhookSecret)
}
@@ -183,20 +204,21 @@ func (s *EnterpriseTestSuite) TestCreateEnterpriseInvalidDBPassphrase() {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
// make sure we use a 'sqlDatabase' struct with a wrong 'cfg.Passphrase'
- cfg.Passphrase = "wrong-passphrase" // it must have a size different than 32
+ cfg.Passphrase = wrongPassphrase // it must have a size different than 32
sqlDB := &sqlDatabase{
conn: conn,
cfg: cfg,
}
_, err = sqlDB.CreateEnterprise(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateEnterpriseParams.Name,
- s.Fixtures.CreateEnterpriseParams.CredentialsName,
- s.Fixtures.CreateEnterpriseParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateEnterpriseParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
s.Require().NotNil(err)
- s.Require().Equal("encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterpriseDBCreateErr() {
@@ -207,18 +229,19 @@ func (s *EnterpriseTestSuite) TestCreateEnterpriseDBCreateErr() {
s.Fixtures.SQLMock.ExpectRollback()
_, err := s.StoreSQLMocked.CreateEnterprise(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateEnterpriseParams.Name,
- s.Fixtures.CreateEnterpriseParams.CredentialsName,
- s.Fixtures.CreateEnterpriseParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateEnterpriseParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("creating enterprise: creating enterprise mock error", err.Error())
+ s.Require().Equal("error creating enterprise: error creating enterprise: creating enterprise mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestGetEnterprise() {
- enterprise, err := s.Store.GetEnterprise(context.Background(), s.Fixtures.Enterprises[0].Name)
+ enterprise, err := s.Store.GetEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].Name, s.Fixtures.Enterprises[0].Endpoint.Name)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Enterprises[0].Name, enterprise.Name)
@@ -226,71 +249,121 @@ func (s *EnterpriseTestSuite) TestGetEnterprise() {
}
func (s *EnterpriseTestSuite) TestGetEnterpriseCaseInsensitive() {
- enterprise, err := s.Store.GetEnterprise(context.Background(), "TeSt-eNtErPriSe-1")
+ enterprise, err := s.Store.GetEnterprise(s.adminCtx, "TeSt-eNtErPriSe-1", "github.com")
s.Require().Nil(err)
s.Require().Equal("test-enterprise-1", enterprise.Name)
}
func (s *EnterpriseTestSuite) TestGetEnterpriseNotFound() {
- _, err := s.Store.GetEnterprise(context.Background(), "dummy-name")
+ _, err := s.Store.GetEnterprise(s.adminCtx, "dummy-name", "github.com")
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: not found", err.Error())
+ s.Require().Equal("error fetching enterprise: not found", err.Error())
}
func (s *EnterpriseTestSuite) TestGetEnterpriseDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE name = ? COLLATE NOCASE AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].Name).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE (name = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE) AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].Name, s.Fixtures.Enterprises[0].Endpoint.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow(s.Fixtures.Enterprises[0].Name))
- _, err := s.StoreSQLMocked.GetEnterprise(context.Background(), s.Fixtures.Enterprises[0].Name)
+ _, err := s.StoreSQLMocked.GetEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].Name, s.Fixtures.Enterprises[0].Endpoint.Name)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: missing secret", err.Error())
+ s.Require().Equal("error fetching enterprise: missing secret", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestListEnterprises() {
- enterprises, err := s.Store.ListEnterprises(context.Background())
+ enterprises, err := s.Store.ListEnterprises(s.adminCtx, params.EnterpriseFilter{})
s.Require().Nil(err)
garmTesting.EqualDBEntityByName(s.T(), s.Fixtures.Enterprises, enterprises)
}
+func (s *EnterpriseTestSuite) TestListEnterprisesWithFilter() {
+ enterprise, err := s.Store.CreateEnterprise(
+ s.adminCtx,
+ "test-enterprise",
+ s.ghesCreds,
+ "test-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ enterprise2, err := s.Store.CreateEnterprise(
+ s.adminCtx,
+ "test-enterprise",
+ s.testCreds,
+ "test-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ enterprise3, err := s.Store.CreateEnterprise(
+ s.adminCtx,
+ "test-enterprise2",
+ s.testCreds,
+ "test-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+ enterprises, err := s.Store.ListEnterprises(s.adminCtx, params.EnterpriseFilter{
+ Name: "test-enterprise",
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise, enterprise2}, enterprises)
+
+ enterprises, err = s.Store.ListEnterprises(s.adminCtx, params.EnterpriseFilter{
+ Name: "test-enterprise",
+ Endpoint: s.ghesEndpoint.Name,
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise}, enterprises)
+
+ enterprises, err = s.Store.ListEnterprises(s.adminCtx, params.EnterpriseFilter{
+ Name: "test-enterprise2",
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise3}, enterprises)
+}
+
func (s *EnterpriseTestSuite) TestListEnterprisesDBFetchErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE `enterprises`.`deleted_at` IS NULL")).
WillReturnError(fmt.Errorf("fetching user from database mock error"))
- _, err := s.StoreSQLMocked.ListEnterprises(context.Background())
+ _, err := s.StoreSQLMocked.ListEnterprises(s.adminCtx, params.EnterpriseFilter{})
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprises: fetching user from database mock error", err.Error())
+ s.Require().Equal("error fetching enterprises: fetching user from database mock error", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterprise() {
- err := s.Store.DeleteEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID)
+ err := s.Store.DeleteEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID)
s.Require().Nil(err)
- _, err = s.Store.GetEnterpriseByID(context.Background(), s.Fixtures.Enterprises[0].ID)
+ _, err = s.Store.GetEnterpriseByID(s.adminCtx, s.Fixtures.Enterprises[0].ID)
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: not found", err.Error())
+ s.Require().Equal("error fetching enterprise: not found", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterpriseInvalidEnterpriseID() {
- err := s.Store.DeleteEnterprise(context.Background(), "dummy-enterprise-id")
+ err := s.Store.DeleteEnterprise(s.adminCtx, "dummy-enterprise-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching enterprise: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterpriseDBDeleteErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@@ -299,114 +372,153 @@ func (s *EnterpriseTestSuite) TestDeleteEnterpriseDBDeleteErr() {
WillReturnError(fmt.Errorf("mocked delete enterprise error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeleteEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID)
+ err := s.StoreSQLMocked.DeleteEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("deleting enterprise: mocked delete enterprise error", err.Error())
+ s.Require().Equal("error deleting enterprise: mocked delete enterprise error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestUpdateEnterprise() {
- enterprise, err := s.Store.UpdateEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
+ enterprise, err := s.Store.UpdateEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, enterprise.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, enterprise.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, enterprise.WebhookSecret)
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseInvalidEnterpriseID() {
- _, err := s.Store.UpdateEnterprise(context.Background(), "dummy-enterprise-id", s.Fixtures.UpdateRepoParams)
+ _, err := s.Store.UpdateEnterprise(s.adminCtx, "dummy-enterprise-id", s.Fixtures.UpdateRepoParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: parsing id: invalid request", err.Error())
+ s.Require().Equal("error updating enterprise: error fetching enterprise: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseDBEncryptErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
-
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Enterprises[0].ID, s.Fixtures.Enterprises[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error updating enterprise: error encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseDBSaveErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Enterprises[0].ID, s.Fixtures.Enterprises[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
s.Fixtures.SQLMock.
ExpectExec(("UPDATE `enterprises` SET")).
WillReturnError(fmt.Errorf("saving enterprise mock error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving enterprise: saving enterprise mock error", err.Error())
+ s.Require().Equal("error updating enterprise: error saving enterprise: saving enterprise mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseDBDecryptingErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
- s.Fixtures.UpdateRepoParams.WebhookSecret = "webhook-secret"
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.UpdateRepoParams.WebhookSecret = webhookSecret
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Enterprises[0].ID, s.Fixtures.Enterprises[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateEnterprise(s.adminCtx, s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error updating enterprise: error encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestGetEnterpriseByID() {
- enterprise, err := s.Store.GetEnterpriseByID(context.Background(), s.Fixtures.Enterprises[0].ID)
+ enterprise, err := s.Store.GetEnterpriseByID(s.adminCtx, s.Fixtures.Enterprises[0].ID)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Enterprises[0].ID, enterprise.ID)
}
func (s *EnterpriseTestSuite) TestGetEnterpriseByIDInvalidEnterpriseID() {
- _, err := s.Store.GetEnterpriseByID(context.Background(), "dummy-enterprise-id")
+ _, err := s.Store.GetEnterpriseByID(s.adminCtx, "dummy-enterprise-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching enterprise: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestGetEnterpriseByIDDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprise_events` WHERE `enterprise_events`.`enterprise_id` = ? AND `enterprise_events`.`deleted_at` IS NULL")).
+ WithArgs(s.Fixtures.Enterprises[0].ID).
+ WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND `pools`.`deleted_at` IS NULL")).
WithArgs(s.Fixtures.Enterprises[0].ID).
WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- _, err := s.StoreSQLMocked.GetEnterpriseByID(context.Background(), s.Fixtures.Enterprises[0].ID)
+ _, err := s.StoreSQLMocked.GetEnterpriseByID(s.adminCtx, s.Fixtures.Enterprises[0].ID)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: missing secret", err.Error())
+ s.Require().Equal("error fetching enterprise: missing secret", err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePool() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().Nil(err)
- enterprise, err := s.Store.GetEnterpriseByID(context.Background(), s.Fixtures.Enterprises[0].ID)
+ enterprise, err := s.Store.GetEnterpriseByID(s.adminCtx, s.Fixtures.Enterprises[0].ID)
if err != nil {
s.FailNow(fmt.Sprintf("cannot get enterprise by ID: %v", err))
}
@@ -419,216 +531,119 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePool() {
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolMissingTags() {
s.Fixtures.CreatePoolParams.Tags = []string{}
-
- _, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
s.Require().Equal("no tags specified", err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolInvalidEnterpriseID() {
- _, err := s.Store.CreateEnterprisePool(context.Background(), "dummy-enterprise-id", s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ _, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: parsing id: invalid request", err.Error())
-}
-
-func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBCreateErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WillReturnError(fmt.Errorf("mocked creating pool error"))
-
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("creating pool: fetching pool: mocked creating pool error", err.Error())
-}
-
-func (s *EnterpriseTestSuite) TestCreateEnterpriseDBPoolAlreadyExistErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id", "provider_name", "image", "flavor"}).
- AddRow(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor))
-
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal(runnerErrors.NewConflictError("pool with the same image and flavor already exists on this provider"), err)
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBFetchTagErr() {
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked fetching tag error"))
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching tag: fetching tag from database: mocked fetching tag error", err.Error())
+ s.Require().Equal("error creating tag: error fetching tag from database: mocked fetching tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBAddingPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
-
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnError(fmt.Errorf("mocked adding pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("adding pool: mocked adding pool error", err.Error())
+ s.Require().Equal("error creating pool: mocked adding pool error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBSaveTagErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnError(fmt.Errorf("mocked saving tag error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving tag: mocked saving tag error", err.Error())
+ s.Require().Equal("error associating tags: mocked saving tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBFetchPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Enterprises[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Enterprises[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnResult(sqlmock.NewResult(1, 1))
@@ -640,161 +655,163 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBFetchPoolErr() {
WillReturnResult(sqlmock.NewResult(1, 1))
s.Fixtures.SQLMock.ExpectCommit()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
- _, err := s.StoreSQLMocked.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: not found", err.Error())
+ s.Require().Equal("error fetching pool by ID: not found", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *EnterpriseTestSuite) TestListEnterprisePools() {
enterprisePools := []params.Pool{}
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Flavor = fmt.Sprintf("test-flavor-%v", i)
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
enterprisePools = append(enterprisePools, pool)
}
- pools, err := s.Store.ListEnterprisePools(context.Background(), s.Fixtures.Enterprises[0].ID)
+ pools, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().Nil(err)
garmTesting.EqualDBEntityID(s.T(), enterprisePools, pools)
}
func (s *EnterpriseTestSuite) TestListEnterprisePoolsInvalidEnterpriseID() {
- _, err := s.Store.ListEnterprisePools(context.Background(), "dummy-enterprise-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ _, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching pools: fetching enterprise: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pools: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestGetEnterprisePool() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
- enterprisePool, err := s.Store.GetEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, pool.ID)
+ enterprisePool, err := s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
s.Require().Equal(enterprisePool.ID, pool.ID)
}
func (s *EnterpriseTestSuite) TestGetEnterprisePoolInvalidEnterpriseID() {
- _, err := s.Store.GetEnterprisePool(context.Background(), "dummy-enterprise-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ _, err := s.Store.GetEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("fetching pool: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePool() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
- err = s.Store.DeleteEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, pool.ID)
+ err = s.Store.DeleteEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
- _, err = s.Store.GetEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolInvalidEnterpriseID() {
- err := s.Store.DeleteEnterprisePool(context.Background(), "dummy-enterprise-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ err := s.Store.DeleteEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("looking up enterprise pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolDBDeleteErr() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (id = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID, s.Fixtures.Enterprises[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"enterprise_id", "id"}).AddRow(s.Fixtures.Enterprises[0].ID, pool.ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE `pools`.`id` = ?")).
- WithArgs(pool.ID).
+ ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE id = ? and enterprise_id = ?")).
+ WithArgs(pool.ID, s.Fixtures.Enterprises[0].ID).
WillReturnError(fmt.Errorf("mocked deleting pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- err = s.StoreSQLMocked.DeleteEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, pool.ID)
-
+ err = s.StoreSQLMocked.DeleteEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().NotNil(err)
+ s.Require().Equal("error removing pool: mocked deleting pool error", err.Error())
s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("deleting pool: mocked deleting pool error", err.Error())
-}
-
-func (s *EnterpriseTestSuite) TestFindEnterprisePoolByTags() {
- enterprisePool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
- if err != nil {
- s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
- }
-
- pool, err := s.Store.FindEnterprisePoolByTags(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams.Tags)
-
- s.Require().Nil(err)
- s.Require().Equal(enterprisePool.ID, pool.ID)
- s.Require().Equal(enterprisePool.Image, pool.Image)
- s.Require().Equal(enterprisePool.Flavor, pool.Flavor)
-}
-
-func (s *EnterpriseTestSuite) TestFindEnterprisePoolByTagsMissingTags() {
- tags := []string{}
-
- _, err := s.Store.FindEnterprisePoolByTags(context.Background(), s.Fixtures.Enterprises[0].ID, tags)
-
- s.Require().NotNil(err)
- s.Require().Equal("fetching pool: missing tags", err.Error())
}
func (s *EnterpriseTestSuite) TestListEnterpriseInstances() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
poolInstances := []params.Instance{}
for i := 1; i <= 3; i++ {
s.Fixtures.CreateInstanceParams.Name = fmt.Sprintf("test-enterprise-%v", i)
- instance, err := s.Store.CreateInstance(context.Background(), pool.ID, s.Fixtures.CreateInstanceParams)
+ instance, err := s.Store.CreateInstance(s.adminCtx, pool.ID, s.Fixtures.CreateInstanceParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create instance: %s", err))
}
poolInstances = append(poolInstances, instance)
}
- instances, err := s.Store.ListEnterpriseInstances(context.Background(), s.Fixtures.Enterprises[0].ID)
+ instances, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().Nil(err)
s.equalInstancesByName(poolInstances, instances)
}
func (s *EnterpriseTestSuite) TestListEnterpriseInstancesInvalidEnterpriseID() {
- _, err := s.Store.ListEnterpriseInstances(context.Background(), "dummy-enterprise-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ _, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: fetching enterprise: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching entity: error parsing id: invalid request", err.Error())
}
func (s *EnterpriseTestSuite) TestUpdateEnterprisePool() {
- pool, err := s.Store.CreateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Enterprises[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
- pool, err = s.Store.UpdateEnterprisePool(context.Background(), s.Fixtures.Enterprises[0].ID, pool.ID, s.Fixtures.UpdatePoolParams)
+ pool, err = s.Store.UpdateEntityPool(s.adminCtx, entity, pool.ID, s.Fixtures.UpdatePoolParams)
s.Require().Nil(err)
s.Require().Equal(*s.Fixtures.UpdatePoolParams.MaxRunners, pool.MaxRunners)
@@ -804,10 +821,36 @@ func (s *EnterpriseTestSuite) TestUpdateEnterprisePool() {
}
func (s *EnterpriseTestSuite) TestUpdateEnterprisePoolInvalidEnterpriseID() {
- _, err := s.Store.UpdateEnterprisePool(context.Background(), "dummy-enterprise-id", "dummy-pool-id", s.Fixtures.UpdatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-enterprise-id",
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ _, err := s.Store.UpdateEntityPool(s.adminCtx, entity, "dummy-pool-id", s.Fixtures.UpdatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error parsing id: invalid request", err.Error())
+}
+
+func (s *EnterpriseTestSuite) TestAddRepoEntityEvent() {
+ enterprise, err := s.Store.CreateEnterprise(
+ s.adminCtx,
+ s.Fixtures.CreateEnterpriseParams.Name,
+ s.testCreds,
+ s.Fixtures.CreateEnterpriseParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
+
+ s.Require().Nil(err)
+ entity, err := enterprise.GetEntity()
+ s.Require().Nil(err)
+ err = s.Store.AddEntityEvent(s.adminCtx, entity, params.StatusEvent, params.EventInfo, "this is a test", 20)
+ s.Require().Nil(err)
+
+ enterprise, err = s.Store.GetEnterpriseByID(s.adminCtx, enterprise.ID)
+ s.Require().Nil(err)
+ s.Require().Equal(1, len(enterprise.Events))
+ s.Require().Equal(params.StatusEvent, enterprise.Events[0].EventType)
+ s.Require().Equal(params.EventInfo, enterprise.Events[0].EventLevel)
+ s.Require().Equal("this is a test", enterprise.Events[0].Message)
}
func TestEnterpriseTestSuite(t *testing.T) {
diff --git a/database/sql/gitea.go b/database/sql/gitea.go
new file mode 100644
index 00000000..a9edde09
--- /dev/null
+++ b/database/sql/gitea.go
@@ -0,0 +1,486 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+
+ "gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+)
+
+func (s *sqlDatabase) CreateGiteaEndpoint(_ context.Context, param params.CreateGiteaEndpointParams) (ghEndpoint params.ForgeEndpoint, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.CreateOperation, ghEndpoint)
+ }
+ }()
+ var endpoint GithubEndpoint
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := tx.Where("name = ?", param.Name).First(&endpoint).Error; err == nil {
+ return fmt.Errorf("gitea endpoint already exists: %w", runnerErrors.ErrDuplicateEntity)
+ }
+ endpoint = GithubEndpoint{
+ Name: param.Name,
+ Description: param.Description,
+ APIBaseURL: param.APIBaseURL,
+ BaseURL: param.BaseURL,
+ CACertBundle: param.CACertBundle,
+ EndpointType: params.GiteaEndpointType,
+ }
+
+ if err := tx.Create(&endpoint).Error; err != nil {
+ return fmt.Errorf("error creating gitea endpoint: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error creating gitea endpoint: %w", err)
+ }
+ ghEndpoint, err = s.sqlToCommonGithubEndpoint(endpoint)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error converting gitea endpoint: %w", err)
+ }
+ return ghEndpoint, nil
+}
+
+func (s *sqlDatabase) ListGiteaEndpoints(_ context.Context) ([]params.ForgeEndpoint, error) {
+ var endpoints []GithubEndpoint
+ err := s.conn.Where("endpoint_type = ?", params.GiteaEndpointType).Find(&endpoints).Error
+ if err != nil {
+ return nil, fmt.Errorf("error fetching gitea endpoints: %w", err)
+ }
+
+ var ret []params.ForgeEndpoint
+ for _, ep := range endpoints {
+ commonEp, err := s.sqlToCommonGithubEndpoint(ep)
+ if err != nil {
+ return nil, fmt.Errorf("error converting gitea endpoint: %w", err)
+ }
+ ret = append(ret, commonEp)
+ }
+ return ret, nil
+}
+
+func (s *sqlDatabase) UpdateGiteaEndpoint(_ context.Context, name string, param params.UpdateGiteaEndpointParams) (ghEndpoint params.ForgeEndpoint, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.UpdateOperation, ghEndpoint)
+ }
+ }()
+ var endpoint GithubEndpoint
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := tx.Where("name = ? and endpoint_type = ?", name, params.GiteaEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return runnerErrors.NewNotFoundError("gitea endpoint %q not found", name)
+ }
+ return fmt.Errorf("error fetching gitea endpoint: %w", err)
+ }
+
+ var credsCount int64
+ if err := tx.Model(&GiteaCredentials{}).Where("endpoint_name = ?", endpoint.Name).Count(&credsCount).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ }
+ if credsCount > 0 && (param.APIBaseURL != nil || param.BaseURL != nil) {
+ return runnerErrors.NewBadRequestError("cannot update endpoint URLs with existing credentials")
+ }
+
+ if param.APIBaseURL != nil {
+ endpoint.APIBaseURL = *param.APIBaseURL
+ }
+
+ if param.BaseURL != nil {
+ endpoint.BaseURL = *param.BaseURL
+ }
+
+ if param.CACertBundle != nil {
+ endpoint.CACertBundle = param.CACertBundle
+ }
+
+ if param.Description != nil {
+ endpoint.Description = *param.Description
+ }
+
+ if err := tx.Save(&endpoint).Error; err != nil {
+ return fmt.Errorf("error updating gitea endpoint: %w", err)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error updating gitea endpoint: %w", err)
+ }
+ ghEndpoint, err = s.sqlToCommonGithubEndpoint(endpoint)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error converting gitea endpoint: %w", err)
+ }
+ return ghEndpoint, nil
+}
+
+func (s *sqlDatabase) GetGiteaEndpoint(_ context.Context, name string) (params.ForgeEndpoint, error) {
+ var endpoint GithubEndpoint
+ err := s.conn.Where("name = ? and endpoint_type = ?", name, params.GiteaEndpointType).First(&endpoint).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return params.ForgeEndpoint{}, runnerErrors.NewNotFoundError("gitea endpoint %q not found", name)
+ }
+ return params.ForgeEndpoint{}, fmt.Errorf("error fetching gitea endpoint: %w", err)
+ }
+
+ return s.sqlToCommonGithubEndpoint(endpoint)
+}
+
+func (s *sqlDatabase) DeleteGiteaEndpoint(_ context.Context, name string) (err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.DeleteOperation, params.ForgeEndpoint{Name: name})
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var endpoint GithubEndpoint
+ if err := tx.Where("name = ? and endpoint_type = ?", name, params.GiteaEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error fetching gitea endpoint: %w", err)
+ }
+
+ var credsCount int64
+ if err := tx.Model(&GiteaCredentials{}).Where("endpoint_name = ?", endpoint.Name).Count(&credsCount).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ }
+
+ var repoCnt int64
+ if err := tx.Model(&Repository{}).Where("endpoint_name = ?", endpoint.Name).Count(&repoCnt).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching gitea repositories: %w", err)
+ }
+ }
+
+ var orgCnt int64
+ if err := tx.Model(&Organization{}).Where("endpoint_name = ?", endpoint.Name).Count(&orgCnt).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching gitea organizations: %w", err)
+ }
+ }
+
+ if credsCount > 0 || repoCnt > 0 || orgCnt > 0 {
+ return runnerErrors.NewBadRequestError("cannot delete endpoint with associated entities")
+ }
+
+ if err := tx.Unscoped().Delete(&endpoint).Error; err != nil {
+ return fmt.Errorf("error deleting gitea endpoint: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error deleting gitea endpoint: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) CreateGiteaCredentials(ctx context.Context, param params.CreateGiteaCredentialsParams) (gtCreds params.ForgeCredentials, err error) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error creating gitea credentials: %w", err)
+ }
+ if param.Endpoint == "" {
+ return params.ForgeCredentials{}, runnerErrors.NewBadRequestError("endpoint name is required")
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GiteaCredentialsEntityType, common.CreateOperation, gtCreds)
+ }
+ }()
+ var creds GiteaCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var endpoint GithubEndpoint
+ if err := tx.Where("name = ? and endpoint_type = ?", param.Endpoint, params.GiteaEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return runnerErrors.NewNotFoundError("gitea endpoint %q not found", param.Endpoint)
+ }
+ return fmt.Errorf("error fetching gitea endpoint: %w", err)
+ }
+
+ if err := tx.Where("name = ? and user_id = ?", param.Name, userID).First(&creds).Error; err == nil {
+ return fmt.Errorf("gitea credentials already exists: %w", runnerErrors.ErrDuplicateEntity)
+ }
+
+ var data []byte
+ var err error
+ switch param.AuthType {
+ case params.ForgeAuthTypePAT:
+ data, err = s.marshalAndSeal(param.PAT)
+ default:
+ return runnerErrors.NewBadRequestError("invalid auth type %q", param.AuthType)
+ }
+ if err != nil {
+ return fmt.Errorf("error marshaling and sealing credentials: %w", err)
+ }
+
+ creds = GiteaCredentials{
+ Name: param.Name,
+ Description: param.Description,
+ EndpointName: &endpoint.Name,
+ AuthType: param.AuthType,
+ Payload: data,
+ UserID: &userID,
+ }
+
+ if err := tx.Create(&creds).Error; err != nil {
+ return fmt.Errorf("error creating gitea credentials: %w", err)
+ }
+ // Skip making an extra query.
+ creds.Endpoint = endpoint
+
+ return nil
+ })
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error creating gitea credentials: %w", err)
+ }
+ gtCreds, err = s.sqlGiteaToCommonForgeCredentials(creds)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting gitea credentials: %w", err)
+ }
+ return gtCreds, nil
+}
+
+func (s *sqlDatabase) getGiteaCredentialsByName(ctx context.Context, tx *gorm.DB, name string, detailed bool) (GiteaCredentials, error) {
+ var creds GiteaCredentials
+ q := tx.Preload("Endpoint")
+
+ if detailed {
+ q = q.
+ Preload("Repositories").
+ Preload("Organizations").
+ Preload("Repositories.GiteaCredentials").
+ Preload("Organizations.GiteaCredentials").
+ Preload("Repositories.Credentials").
+ Preload("Organizations.Credentials")
+ }
+
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return GiteaCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+
+ err = q.Where("name = ?", name).First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return GiteaCredentials{}, runnerErrors.NewNotFoundError("gitea credentials %q not found", name)
+ }
+ return GiteaCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ return creds, nil
+}
+
+func (s *sqlDatabase) GetGiteaCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error) {
+ creds, err := s.getGiteaCredentialsByName(ctx, s.conn, name, detailed)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ return s.sqlGiteaToCommonForgeCredentials(creds)
+}
+
+func (s *sqlDatabase) GetGiteaCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error) {
+ var creds GiteaCredentials
+ q := s.conn.Preload("Endpoint")
+
+ if detailed {
+ q = q.
+ Preload("Repositories").
+ Preload("Organizations").
+ Preload("Repositories.GiteaCredentials").
+ Preload("Organizations.GiteaCredentials").
+ Preload("Repositories.Credentials").
+ Preload("Organizations.Credentials")
+ }
+
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ err := q.Where("id = ?", id).First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return params.ForgeCredentials{}, runnerErrors.NewNotFoundError("gitea credentials not found")
+ }
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ return s.sqlGiteaToCommonForgeCredentials(creds)
+}
+
+func (s *sqlDatabase) ListGiteaCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
+ q := s.conn.Preload("Endpoint")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ var creds []GiteaCredentials
+ err := q.Preload("Endpoint").Find(&creds).Error
+ if err != nil {
+ return nil, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ var ret []params.ForgeCredentials
+ for _, c := range creds {
+ commonCreds, err := s.sqlGiteaToCommonForgeCredentials(c)
+ if err != nil {
+ return nil, fmt.Errorf("error converting gitea credentials: %w", err)
+ }
+ ret = append(ret, commonCreds)
+ }
+ return ret, nil
+}
+
+func (s *sqlDatabase) UpdateGiteaCredentials(ctx context.Context, id uint, param params.UpdateGiteaCredentialsParams) (gtCreds params.ForgeCredentials, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GiteaCredentialsEntityType, common.UpdateOperation, gtCreds)
+ }
+ }()
+ var creds GiteaCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ q := tx.Preload("Endpoint")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return fmt.Errorf("error updating gitea credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ if err := q.Where("id = ?", id).First(&creds).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return runnerErrors.NewNotFoundError("gitea credentials not found")
+ }
+ return fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ if param.Name != nil {
+ creds.Name = *param.Name
+ }
+ if param.Description != nil {
+ creds.Description = *param.Description
+ }
+
+ var data []byte
+ var err error
+ switch creds.AuthType {
+ case params.ForgeAuthTypePAT:
+ if param.PAT != nil {
+ data, err = s.marshalAndSeal(param.PAT)
+ }
+ default:
+ return runnerErrors.NewBadRequestError("invalid auth type %q", creds.AuthType)
+ }
+
+ if err != nil {
+ return fmt.Errorf("error marshaling and sealing credentials: %w", err)
+ }
+ if len(data) > 0 {
+ creds.Payload = data
+ }
+
+ if err := tx.Save(&creds).Error; err != nil {
+ return fmt.Errorf("error updating gitea credentials: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error updating gitea credentials: %w", err)
+ }
+
+ gtCreds, err = s.sqlGiteaToCommonForgeCredentials(creds)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting gitea credentials: %w", err)
+ }
+ return gtCreds, nil
+}
+
+func (s *sqlDatabase) DeleteGiteaCredentials(ctx context.Context, id uint) (err error) {
+ var creds GiteaCredentials
+ defer func() {
+ if err == nil {
+ forgeCreds, innerErr := s.sqlGiteaToCommonForgeCredentials(creds)
+ if innerErr != nil {
+ slog.ErrorContext(ctx, "converting gitea credentials", "error", innerErr)
+ }
+ if creds.ID == 0 || creds.Name == "" {
+ return
+ }
+ s.sendNotify(common.GiteaCredentialsEntityType, common.DeleteOperation, forgeCreds)
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ q := tx.Where("id = ?", id).
+ Preload("Repositories").
+ Preload("Organizations")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return fmt.Errorf("error deleting gitea credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ err := q.First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+
+ if len(creds.Repositories) > 0 {
+ return runnerErrors.NewBadRequestError("cannot delete credentials with repositories")
+ }
+ if len(creds.Organizations) > 0 {
+ return runnerErrors.NewBadRequestError("cannot delete credentials with organizations")
+ }
+ if err := tx.Unscoped().Delete(&creds).Error; err != nil {
+ return fmt.Errorf("error deleting gitea credentials: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error deleting gitea credentials: %w", err)
+ }
+ return nil
+}
diff --git a/database/sql/gitea_test.go b/database/sql/gitea_test.go
new file mode 100644
index 00000000..dff5c471
--- /dev/null
+++ b/database/sql/gitea_test.go
@@ -0,0 +1,848 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/database/common"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+type GiteaTestSuite struct {
+ suite.Suite
+
+ giteaEndpoint params.ForgeEndpoint
+ db common.Store
+}
+
+func (s *GiteaTestSuite) SetupTest() {
+ db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
+ }
+
+ s.db = db
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+ endpoint, err := s.db.CreateGiteaEndpoint(context.Background(), createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+ s.Require().Equal(testEndpointName, endpoint.Name)
+ s.giteaEndpoint = endpoint
+}
+
+func (s *GiteaTestSuite) TestCreatingEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: alternetTestEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+ s.Require().Equal(alternetTestEndpointName, endpoint.Name)
+}
+
+func (s *GiteaTestSuite) TestCreatingDuplicateEndpointFails() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: alternetTestEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ _, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ _, err = s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrDuplicateEntity)
+}
+
+func (s *GiteaTestSuite) TestGetEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: alternetTestEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ newEndpoint, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ endpoint, err := s.db.GetGiteaEndpoint(ctx, createEpParams.Name)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+ s.Require().Equal(newEndpoint.Name, endpoint.Name)
+}
+
+func (s *GiteaTestSuite) TestGetNonExistingEndpointFailsWithNotFoundError() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.GetGiteaEndpoint(ctx, "non-existing")
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestDeletingNonExistingEndpointIsANoop() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ err := s.db.DeleteGiteaEndpoint(ctx, "non-existing")
+ s.Require().NoError(err)
+}
+
+func (s *GiteaTestSuite) TestDeletingEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: alternetTestEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ err = s.db.DeleteGiteaEndpoint(ctx, alternetTestEndpointName)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGiteaEndpoint(ctx, alternetTestEndpointName)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestUpdateEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: "deleteme",
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ newDescription := "another description"
+ newAPIBaseURL := "https://updated.example.com"
+ newBaseURL := "https://updated.example.com"
+ caCertBundle, err := os.ReadFile("../../testdata/certs/srv-pub.pem")
+ s.Require().NoError(err)
+ updateEpParams := params.UpdateGiteaEndpointParams{
+ Description: &newDescription,
+ APIBaseURL: &newAPIBaseURL,
+ BaseURL: &newBaseURL,
+ CACertBundle: caCertBundle,
+ }
+
+ updatedEndpoint, err := s.db.UpdateGiteaEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(updatedEndpoint)
+ s.Require().Equal(newDescription, updatedEndpoint.Description)
+ s.Require().Equal(newAPIBaseURL, updatedEndpoint.APIBaseURL)
+ s.Require().Equal(newBaseURL, updatedEndpoint.BaseURL)
+ s.Require().Equal(caCertBundle, updatedEndpoint.CACertBundle)
+}
+
+func (s *GiteaTestSuite) TestUpdatingNonExistingEndpointReturnsNotFoundError() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ newDescription := "test desc"
+ updateEpParams := params.UpdateGiteaEndpointParams{
+ Description: &newDescription,
+ }
+
+ _, err := s.db.UpdateGiteaEndpoint(ctx, "non-existing", updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestListEndpoints() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: alternetTestEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ _, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ endpoints, err := s.db.ListGiteaEndpoints(ctx)
+ s.Require().NoError(err)
+ s.Require().Len(endpoints, 2)
+}
+
+func (s *GiteaTestSuite) TestCreateCredentialsFailsWithUnauthorizedForAnonUser() {
+ ctx := context.Background()
+
+ _, err := s.db.CreateGiteaCredentials(ctx, params.CreateGiteaCredentialsParams{})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrUnauthorized)
+}
+
+func (s *GiteaTestSuite) TestCreateCredentialsFailsWhenEndpointNameIsEmpty() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGiteaCredentials(ctx, params.CreateGiteaCredentialsParams{})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().Regexp("endpoint name is required", err.Error())
+}
+
+func (s *GiteaTestSuite) TestCreateCredentialsFailsWhenEndpointDoesNotExist() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGiteaCredentials(ctx, params.CreateGiteaCredentialsParams{Endpoint: "non-existing"})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+ s.Require().Regexp("error creating gitea credentials: gitea endpoint \"non-existing\" not found", err.Error())
+}
+
+func (s *GiteaTestSuite) TestCreateCredentialsFailsWhenAuthTypeIsInvalid() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGiteaCredentials(ctx, params.CreateGiteaCredentialsParams{Endpoint: s.giteaEndpoint.Name, AuthType: "invalid"})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().Regexp("invalid auth type", err.Error())
+}
+
+func (s *GiteaTestSuite) TestCreateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+ s.Require().Equal(credParams.Name, creds.Name)
+ s.Require().Equal(credParams.Description, creds.Description)
+ s.Require().Equal(credParams.Endpoint, creds.Endpoint.Name)
+ s.Require().Equal(credParams.AuthType, creds.AuthType)
+}
+
+func (s *GiteaTestSuite) TestCreateCredentialsFailsOnDuplicateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "testuser", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+
+ // Creating creds with the same parameters should fail for the same user.
+ _, err = s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrDuplicateEntity)
+
+ // Creating creds with the same parameters should work for different users.
+ _, err = s.db.CreateGiteaCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+}
+
+func (s *GiteaTestSuite) TestNormalUsersCanOnlySeeTheirOwnCredentialsAdminCanSeeAll() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "testuser1", s.db, s.T())
+ testUser2 := garmTesting.CreateGARMTestUser(ctx, "testuser2", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+ testUser2Ctx := auth.PopulateContext(context.Background(), testUser2, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ credParams.Name = "test-creds2"
+ creds2, err := s.db.CreateGiteaCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ credParams.Name = "test-creds3"
+ creds3, err := s.db.CreateGiteaCredentials(testUser2Ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds3)
+
+ credsList, err := s.db.ListGiteaCredentials(ctx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 3)
+
+ credsList, err = s.db.ListGiteaCredentials(testUserCtx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 1)
+ s.Require().Equal("test-creds2", credsList[0].Name)
+
+ credsList, err = s.db.ListGiteaCredentials(testUser2Ctx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 1)
+ s.Require().Equal("test-creds3", credsList[0].Name)
+}
+
+func (s *GiteaTestSuite) TestGetGiteaCredentialsFailsWhenCredentialsDontExist() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.GetGiteaCredentials(ctx, 1, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+
+ _, err = s.db.GetGiteaCredentialsByName(ctx, "non-existing", true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestGetGithubCredentialsByNameReturnsOnlyCurrentUserCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user1", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ creds2, err := s.db.CreateGiteaCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ creds2Get, err := s.db.GetGiteaCredentialsByName(testUserCtx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(testCredsName, creds2Get.Name)
+ s.Require().Equal(creds2.ID, creds2Get.ID)
+
+ credsGet, err := s.db.GetGiteaCredentialsByName(ctx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+ s.Require().Equal(testCredsName, credsGet.Name)
+ s.Require().Equal(creds.ID, credsGet.ID)
+
+ // Admin can get any creds by ID
+ credsGet, err = s.db.GetGiteaCredentials(ctx, creds2.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds2.ID, credsGet.ID)
+
+ // Normal user cannot get other user creds by ID
+ _, err = s.db.GetGiteaCredentials(testUserCtx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestGetGithubCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ creds2, err := s.db.GetGiteaCredentialsByName(ctx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds.Name, creds2.Name)
+ s.Require().Equal(creds.ID, creds2.ID)
+
+ creds2, err = s.db.GetGiteaCredentials(ctx, creds.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds.Name, creds2.Name)
+ s.Require().Equal(creds.ID, creds2.ID)
+}
+
+func (s *GiteaTestSuite) TestDeleteGiteaCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGiteaCredentials(ctx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestDeleteGiteaCredentialsByNonAdminUser() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user4", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds4",
+ },
+ }
+
+ // Create creds as admin
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ // Deleting non existent creds will return a nil error. For the test user
+ // the creds created by the admin should not be visible, which leads to not found
+ // which in turn returns no error.
+ err = s.db.DeleteGiteaCredentials(testUserCtx, creds.ID)
+ s.Require().NoError(err)
+
+ // Check that the creds created by the admin are still there.
+ credsGet, err := s.db.GetGiteaCredentials(ctx, creds.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(credsGet)
+ s.Require().Equal(creds.ID, credsGet.ID)
+
+ // Create the same creds with the test user.
+ creds2, err := s.db.CreateGiteaCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ // Remove creds created by test user.
+ err = s.db.DeleteGiteaCredentials(testUserCtx, creds2.ID)
+ s.Require().NoError(err)
+
+ // The creds created by the test user should be gone.
+ _, err = s.db.GetGiteaCredentials(testUserCtx, creds2.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestDeleteCredentialsFailsIfReposOrgsOrEntitiesUseIt() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ repo, err := s.db.CreateRepository(ctx, "test-owner", "test-repo", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(repo)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteRepository(ctx, repo.ID)
+ s.Require().NoError(err)
+
+ org, err := s.db.CreateOrganization(ctx, "test-org", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(org)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteOrganization(ctx, org.ID)
+ s.Require().NoError(err)
+
+ enterprise, err := s.db.CreateEnterprise(ctx, "test-enterprise", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().Equal(params.Enterprise{}, enterprise)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGiteaCredentials(ctx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestUpdateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "just a description"
+ newName := "new-name"
+ newToken := "new-token"
+ updateCredParams := params.UpdateGiteaCredentialsParams{
+ Description: &newDescription,
+ Name: &newName,
+ PAT: ¶ms.GithubPAT{
+ OAuth2Token: newToken,
+ },
+ }
+
+ updatedCreds, err := s.db.UpdateGiteaCredentials(ctx, creds.ID, updateCredParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(updatedCreds)
+ s.Require().Equal(newDescription, updatedCreds.Description)
+ s.Require().Equal(newName, updatedCreds.Name)
+}
+
+func (s *GiteaTestSuite) TestUpdateCredentialsFailsForNonExistingCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ updateCredParams := params.UpdateGiteaCredentialsParams{
+ Description: nil,
+ }
+
+ _, err := s.db.UpdateGiteaCredentials(ctx, 1, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestUpdateCredentialsFailsIfCredentialsAreOwnedByNonAdminUser() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user5", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "new params desc"
+ updateCredParams := params.UpdateGiteaCredentialsParams{
+ Description: &newDescription,
+ }
+
+ _, err = s.db.UpdateGiteaCredentials(testUserCtx, creds.ID, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestAdminUserCanUpdateAnyGiteaCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user5", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "another new description"
+ updateCredParams := params.UpdateGiteaCredentialsParams{
+ Description: &newDescription,
+ }
+
+ newCreds, err := s.db.UpdateGiteaCredentials(ctx, creds.ID, updateCredParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDescription, newCreds.Description)
+}
+
+func (s *GiteaTestSuite) TestDeleteCredentialsWithOrgsOrReposFails() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: s.giteaEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ repo, err := s.db.CreateRepository(ctx, "test-owner", "test-repo", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(repo)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteRepository(ctx, repo.ID)
+ s.Require().NoError(err)
+
+ org, err := s.db.CreateOrganization(ctx, "test-org", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(org)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteOrganization(ctx, org.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGiteaCredentials(ctx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestDeleteGiteaEndpointFailsWithOrgsReposOrCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ endpointParams := params.CreateGiteaEndpointParams{
+ Name: "deleteme",
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ ep, err := s.db.CreateGiteaEndpoint(ctx, endpointParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(ep)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: ep.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ repo, err := s.db.CreateRepository(ctx, "test-owner", "test-repo", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(repo)
+
+ badRequest := &runnerErrors.BadRequestError{}
+ err = s.db.DeleteGiteaEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorAs(err, &badRequest)
+
+ err = s.db.DeleteRepository(ctx, repo.ID)
+ s.Require().NoError(err)
+
+ org, err := s.db.CreateOrganization(ctx, "test-org", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(org)
+
+ err = s.db.DeleteGiteaEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorAs(err, &badRequest)
+
+ err = s.db.DeleteOrganization(ctx, org.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGiteaCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGiteaEndpoint(ctx, ep.Name)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGiteaEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GiteaTestSuite) TestUpdateEndpointURLsFailsIfCredentialsAreAssociated() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: "deleteme",
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ credParams := params.CreateGiteaCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: testEndpointName,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err = s.db.CreateGiteaCredentials(ctx, credParams)
+ s.Require().NoError(err)
+
+ newDescription := "new gitea description"
+ newBaseURL := "https://new-gitea.example.com"
+ newAPIBaseURL := "https://new-gotea.example.com"
+ updateEpParams := params.UpdateGiteaEndpointParams{
+ BaseURL: &newBaseURL,
+ }
+
+ _, err = s.db.UpdateGiteaEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating gitea endpoint: cannot update endpoint URLs with existing credentials")
+
+ updateEpParams = params.UpdateGiteaEndpointParams{
+ APIBaseURL: &newAPIBaseURL,
+ }
+ _, err = s.db.UpdateGiteaEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating gitea endpoint: cannot update endpoint URLs with existing credentials")
+
+ updateEpParams = params.UpdateGiteaEndpointParams{
+ Description: &newDescription,
+ }
+ ret, err := s.db.UpdateGiteaEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDescription, ret.Description)
+}
+
+func (s *GiteaTestSuite) TestListGiteaEndpoints() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGiteaEndpointParams{
+ Name: "deleteme",
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ _, err := s.db.CreateGiteaEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ endpoints, err := s.db.ListGiteaEndpoints(ctx)
+ s.Require().NoError(err)
+ s.Require().Len(endpoints, 2)
+}
+
+func TestGiteaTestSuite(t *testing.T) {
+ suite.Run(t, new(GiteaTestSuite))
+}
diff --git a/database/sql/github.go b/database/sql/github.go
new file mode 100644
index 00000000..626d138f
--- /dev/null
+++ b/database/sql/github.go
@@ -0,0 +1,513 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+)
+
+func (s *sqlDatabase) CreateGithubEndpoint(_ context.Context, param params.CreateGithubEndpointParams) (ghEndpoint params.ForgeEndpoint, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.CreateOperation, ghEndpoint)
+ }
+ }()
+ var endpoint GithubEndpoint
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := tx.Where("name = ?", param.Name).First(&endpoint).Error; err == nil {
+ return fmt.Errorf("error github endpoint already exists: %w", runnerErrors.ErrDuplicateEntity)
+ }
+ endpoint = GithubEndpoint{
+ Name: param.Name,
+ Description: param.Description,
+ APIBaseURL: param.APIBaseURL,
+ BaseURL: param.BaseURL,
+ UploadBaseURL: param.UploadBaseURL,
+ CACertBundle: param.CACertBundle,
+ EndpointType: params.GithubEndpointType,
+ }
+
+ if err := tx.Create(&endpoint).Error; err != nil {
+ return fmt.Errorf("error creating github endpoint: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error creating github endpoint: %w", err)
+ }
+ ghEndpoint, err = s.sqlToCommonGithubEndpoint(endpoint)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error converting github endpoint: %w", err)
+ }
+ return ghEndpoint, nil
+}
+
+func (s *sqlDatabase) ListGithubEndpoints(_ context.Context) ([]params.ForgeEndpoint, error) {
+ var endpoints []GithubEndpoint
+ err := s.conn.Where("endpoint_type = ?", params.GithubEndpointType).Find(&endpoints).Error
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github endpoints: %w", err)
+ }
+
+ var ret []params.ForgeEndpoint
+ for _, ep := range endpoints {
+ commonEp, err := s.sqlToCommonGithubEndpoint(ep)
+ if err != nil {
+ return nil, fmt.Errorf("error converting github endpoint: %w", err)
+ }
+ ret = append(ret, commonEp)
+ }
+ return ret, nil
+}
+
+func (s *sqlDatabase) UpdateGithubEndpoint(_ context.Context, name string, param params.UpdateGithubEndpointParams) (ghEndpoint params.ForgeEndpoint, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.UpdateOperation, ghEndpoint)
+ }
+ }()
+ var endpoint GithubEndpoint
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := tx.Where("name = ? and endpoint_type = ?", name, params.GithubEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error github endpoint not found: %w", runnerErrors.ErrNotFound)
+ }
+ return fmt.Errorf("error fetching github endpoint: %w", err)
+ }
+
+ var credsCount int64
+ if err := tx.Model(&GithubCredentials{}).Where("endpoint_name = ?", endpoint.Name).Count(&credsCount).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ }
+ if credsCount > 0 && (param.APIBaseURL != nil || param.BaseURL != nil || param.UploadBaseURL != nil) {
+ return fmt.Errorf("cannot update endpoint URLs with existing credentials: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if param.APIBaseURL != nil {
+ endpoint.APIBaseURL = *param.APIBaseURL
+ }
+
+ if param.BaseURL != nil {
+ endpoint.BaseURL = *param.BaseURL
+ }
+
+ if param.UploadBaseURL != nil {
+ endpoint.UploadBaseURL = *param.UploadBaseURL
+ }
+
+ if param.CACertBundle != nil {
+ endpoint.CACertBundle = param.CACertBundle
+ }
+
+ if param.Description != nil {
+ endpoint.Description = *param.Description
+ }
+
+ if err := tx.Save(&endpoint).Error; err != nil {
+ return fmt.Errorf("error updating github endpoint: %w", err)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error updating github endpoint: %w", err)
+ }
+ ghEndpoint, err = s.sqlToCommonGithubEndpoint(endpoint)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error converting github endpoint: %w", err)
+ }
+ return ghEndpoint, nil
+}
+
+func (s *sqlDatabase) GetGithubEndpoint(_ context.Context, name string) (params.ForgeEndpoint, error) {
+ var endpoint GithubEndpoint
+
+ err := s.conn.Where("name = ? and endpoint_type = ?", name, params.GithubEndpointType).First(&endpoint).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return params.ForgeEndpoint{}, fmt.Errorf("github endpoint not found: %w", runnerErrors.ErrNotFound)
+ }
+ return params.ForgeEndpoint{}, fmt.Errorf("error fetching github endpoint: %w", err)
+ }
+
+ return s.sqlToCommonGithubEndpoint(endpoint)
+}
+
+func (s *sqlDatabase) DeleteGithubEndpoint(_ context.Context, name string) (err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubEndpointEntityType, common.DeleteOperation, params.ForgeEndpoint{Name: name})
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var endpoint GithubEndpoint
+ if err := tx.Where("name = ? and endpoint_type = ?", name, params.GithubEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error fetching github endpoint: %w", err)
+ }
+
+ var credsCount int64
+ if err := tx.Model(&GithubCredentials{}).Where("endpoint_name = ?", endpoint.Name).Count(&credsCount).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ }
+
+ var repoCnt int64
+ if err := tx.Model(&Repository{}).Where("endpoint_name = ?", endpoint.Name).Count(&repoCnt).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching github repositories: %w", err)
+ }
+ }
+
+ var orgCnt int64
+ if err := tx.Model(&Organization{}).Where("endpoint_name = ?", endpoint.Name).Count(&orgCnt).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching github organizations: %w", err)
+ }
+ }
+
+ var entCnt int64
+ if err := tx.Model(&Enterprise{}).Where("endpoint_name = ?", endpoint.Name).Count(&entCnt).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error fetching github enterprises: %w", err)
+ }
+ }
+
+ if credsCount > 0 || repoCnt > 0 || orgCnt > 0 || entCnt > 0 {
+ return fmt.Errorf("cannot delete endpoint with associated entities: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if err := tx.Unscoped().Delete(&endpoint).Error; err != nil {
+ return fmt.Errorf("error deleting github endpoint: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error deleting github endpoint: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) CreateGithubCredentials(ctx context.Context, param params.CreateGithubCredentialsParams) (ghCreds params.ForgeCredentials, err error) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error creating github credentials: %w", err)
+ }
+ if param.Endpoint == "" {
+ return params.ForgeCredentials{}, fmt.Errorf("endpoint name is required: %w", runnerErrors.ErrBadRequest)
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubCredentialsEntityType, common.CreateOperation, ghCreds)
+ }
+ }()
+ var creds GithubCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var endpoint GithubEndpoint
+ if err := tx.Where("name = ? and endpoint_type = ?", param.Endpoint, params.GithubEndpointType).First(&endpoint).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("github endpoint not found: %w", runnerErrors.ErrNotFound)
+ }
+ return fmt.Errorf("error fetching github endpoint: %w", err)
+ }
+
+ if err := tx.Where("name = ? and user_id = ?", param.Name, userID).First(&creds).Error; err == nil {
+ return fmt.Errorf("github credentials already exists: %w", runnerErrors.ErrDuplicateEntity)
+ }
+
+ var data []byte
+ var err error
+ switch param.AuthType {
+ case params.ForgeAuthTypePAT:
+ data, err = s.marshalAndSeal(param.PAT)
+ case params.ForgeAuthTypeApp:
+ data, err = s.marshalAndSeal(param.App)
+ default:
+ return fmt.Errorf("invalid auth type: %w", runnerErrors.ErrBadRequest)
+ }
+ if err != nil {
+ return fmt.Errorf("error marshaling and sealing credentials: %w", err)
+ }
+
+ creds = GithubCredentials{
+ Name: param.Name,
+ Description: param.Description,
+ EndpointName: &endpoint.Name,
+ AuthType: param.AuthType,
+ Payload: data,
+ UserID: &userID,
+ }
+
+ if err := tx.Create(&creds).Error; err != nil {
+ return fmt.Errorf("error creating github credentials: %w", err)
+ }
+ // Skip making an extra query.
+ creds.Endpoint = endpoint
+
+ return nil
+ })
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error creating github credentials: %w", err)
+ }
+ ghCreds, err = s.sqlToCommonForgeCredentials(creds)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github credentials: %w", err)
+ }
+ return ghCreds, nil
+}
+
+func (s *sqlDatabase) getGithubCredentialsByName(ctx context.Context, tx *gorm.DB, name string, detailed bool) (GithubCredentials, error) {
+ var creds GithubCredentials
+ q := tx.Preload("Endpoint")
+
+ if detailed {
+ q = q.
+ Preload("Repositories").
+ Preload("Repositories.Credentials").
+ Preload("Organizations").
+ Preload("Organizations.Credentials").
+ Preload("Enterprises").
+ Preload("Enterprises.Credentials")
+ }
+
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return GithubCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+
+ err = q.Where("name = ?", name).First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return GithubCredentials{}, fmt.Errorf("github credentials not found: %w", runnerErrors.ErrNotFound)
+ }
+ return GithubCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+
+ return creds, nil
+}
+
+func (s *sqlDatabase) GetGithubCredentialsByName(ctx context.Context, name string, detailed bool) (params.ForgeCredentials, error) {
+ creds, err := s.getGithubCredentialsByName(ctx, s.conn, name, detailed)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ return s.sqlToCommonForgeCredentials(creds)
+}
+
+func (s *sqlDatabase) GetGithubCredentials(ctx context.Context, id uint, detailed bool) (params.ForgeCredentials, error) {
+ var creds GithubCredentials
+ q := s.conn.Preload("Endpoint")
+
+ if detailed {
+ q = q.
+ Preload("Repositories").
+ Preload("Repositories.Credentials").
+ Preload("Organizations").
+ Preload("Organizations.Credentials").
+ Preload("Enterprises").
+ Preload("Enterprises.Credentials")
+ }
+
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ err := q.Where("id = ?", id).First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return params.ForgeCredentials{}, fmt.Errorf("github credentials not found: %w", runnerErrors.ErrNotFound)
+ }
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+
+ return s.sqlToCommonForgeCredentials(creds)
+}
+
+func (s *sqlDatabase) ListGithubCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
+ q := s.conn.Preload("Endpoint")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ var creds []GithubCredentials
+ err := q.Preload("Endpoint").Find(&creds).Error
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+
+ var ret []params.ForgeCredentials
+ for _, c := range creds {
+ commonCreds, err := s.sqlToCommonForgeCredentials(c)
+ if err != nil {
+ return nil, fmt.Errorf("error converting github credentials: %w", err)
+ }
+ ret = append(ret, commonCreds)
+ }
+ return ret, nil
+}
+
+func (s *sqlDatabase) UpdateGithubCredentials(ctx context.Context, id uint, param params.UpdateGithubCredentialsParams) (ghCreds params.ForgeCredentials, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubCredentialsEntityType, common.UpdateOperation, ghCreds)
+ }
+ }()
+ var creds GithubCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ q := tx.Preload("Endpoint")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return fmt.Errorf("error updating github credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ if err := q.Where("id = ?", id).First(&creds).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("github credentials not found: %w", runnerErrors.ErrNotFound)
+ }
+ return fmt.Errorf("error fetching github credentials: %w", err)
+ }
+
+ if param.Name != nil {
+ creds.Name = *param.Name
+ }
+ if param.Description != nil {
+ creds.Description = *param.Description
+ }
+
+ var data []byte
+ var err error
+ switch creds.AuthType {
+ case params.ForgeAuthTypePAT:
+ if param.PAT != nil {
+ data, err = s.marshalAndSeal(param.PAT)
+ }
+
+ if param.App != nil {
+ return fmt.Errorf("cannot update app credentials for PAT: %w", runnerErrors.ErrBadRequest)
+ }
+ case params.ForgeAuthTypeApp:
+ if param.App != nil {
+ data, err = s.marshalAndSeal(param.App)
+ }
+
+ if param.PAT != nil {
+ return fmt.Errorf("cannot update PAT credentials for app: %w", runnerErrors.ErrBadRequest)
+ }
+ default:
+ // This should never happen, unless there was a bug in the DB migration code,
+ // or the DB was manually modified.
+ return fmt.Errorf("invalid auth type: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if err != nil {
+ return fmt.Errorf("error marshaling and sealing credentials: %w", err)
+ }
+ if len(data) > 0 {
+ creds.Payload = data
+ }
+
+ if err := tx.Save(&creds).Error; err != nil {
+ return fmt.Errorf("error updating github credentials: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error updating github credentials: %w", err)
+ }
+
+ ghCreds, err = s.sqlToCommonForgeCredentials(creds)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github credentials: %w", err)
+ }
+ return ghCreds, nil
+}
+
+func (s *sqlDatabase) DeleteGithubCredentials(ctx context.Context, id uint) (err error) {
+ var name string
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.GithubCredentialsEntityType, common.DeleteOperation, params.ForgeCredentials{ID: id, Name: name})
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ q := tx.Where("id = ?", id).
+ Preload("Repositories").
+ Preload("Organizations").
+ Preload("Enterprises")
+ if !auth.IsAdmin(ctx) {
+ userID, err := getUIDFromContext(ctx)
+ if err != nil {
+ return fmt.Errorf("error deleting github credentials: %w", err)
+ }
+ q = q.Where("user_id = ?", userID)
+ }
+
+ var creds GithubCredentials
+ err := q.First(&creds).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ name = creds.Name
+
+ if len(creds.Repositories) > 0 {
+ return fmt.Errorf("cannot delete credentials with repositories: %w", runnerErrors.ErrBadRequest)
+ }
+ if len(creds.Organizations) > 0 {
+ return fmt.Errorf("cannot delete credentials with organizations: %w", runnerErrors.ErrBadRequest)
+ }
+ if len(creds.Enterprises) > 0 {
+ return fmt.Errorf("cannot delete credentials with enterprises: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if err := tx.Unscoped().Delete(&creds).Error; err != nil {
+ return fmt.Errorf("error deleting github credentials: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error deleting github credentials: %w", err)
+ }
+ return nil
+}
diff --git a/database/sql/github_test.go b/database/sql/github_test.go
new file mode 100644
index 00000000..ae3a3954
--- /dev/null
+++ b/database/sql/github_test.go
@@ -0,0 +1,1041 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/config"
+ "github.com/cloudbase/garm/database/common"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+const (
+ testUploadBaseURL string = "https://uploads.example.com"
+ testBaseURL string = "https://example.com"
+ testAPIBaseURL string = "https://api.example.com"
+ testEndpointName string = "test-endpoint"
+ alternetTestEndpointName string = "test-endpoint-alternate"
+ testEndpointDescription string = "test description"
+ testCredsName string = "test-creds"
+ testCredsDescription string = "test creds"
+ defaultGithubEndpoint string = "github.com"
+)
+
+type GithubTestSuite struct {
+ suite.Suite
+
+ db common.Store
+}
+
+func (s *GithubTestSuite) SetupTest() {
+ db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
+ }
+ s.db = db
+}
+
+func (s *GithubTestSuite) TestDefaultEndpointGetsCreatedAutomaticallyIfNoOtherEndpointExists() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ endpoint, err := s.db.GetGithubEndpoint(ctx, defaultGithubEndpoint)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+}
+
+func (s *GithubTestSuite) TestDeletingDefaultEndpointWorksIfNoCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ err := s.db.DeleteGithubEndpoint(ctx, defaultGithubEndpoint)
+ s.Require().NoError(err)
+}
+
+func (s *GithubTestSuite) TestCreatingEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+ s.Require().Equal(testEndpointName, endpoint.Name)
+}
+
+func (s *GithubTestSuite) TestCreatingDuplicateEndpointFails() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ _, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ _, err = s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrDuplicateEntity)
+}
+
+func (s *GithubTestSuite) TestGetEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ endpoint, err := s.db.GetGithubEndpoint(ctx, defaultGithubEndpoint)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+ s.Require().Equal(defaultGithubEndpoint, endpoint.Name)
+}
+
+func (s *GithubTestSuite) TestGetNonExistingEndpointFailsWithNotFoundError() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.GetGithubEndpoint(ctx, "non-existing")
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestDeletingNonExistingEndpointIsANoop() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ err := s.db.DeleteGithubEndpoint(ctx, "non-existing")
+ s.Require().NoError(err)
+}
+
+func (s *GithubTestSuite) TestDeletingEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ err = s.db.DeleteGithubEndpoint(ctx, testEndpointName)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGithubEndpoint(ctx, testEndpointName)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestDeleteGithubEndpointFailsWhenCredentialsExist() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: testEndpointName,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err = s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGithubEndpoint(ctx, testEndpointName)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+}
+
+func (s *GithubTestSuite) TestUpdateEndpoint() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ newDescription := "the new description"
+ newAPIBaseURL := "https://new-api.example.com"
+ newUploadBaseURL := "https://new-uploads.example.com"
+ newBaseURL := "https://new.example.com"
+ caCertBundle, err := os.ReadFile("../../testdata/certs/srv-pub.pem")
+ s.Require().NoError(err)
+ updateEpParams := params.UpdateGithubEndpointParams{
+ Description: &newDescription,
+ APIBaseURL: &newAPIBaseURL,
+ UploadBaseURL: &newUploadBaseURL,
+ BaseURL: &newBaseURL,
+ CACertBundle: caCertBundle,
+ }
+
+ updatedEndpoint, err := s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(updatedEndpoint)
+ s.Require().Equal(newDescription, updatedEndpoint.Description)
+ s.Require().Equal(newAPIBaseURL, updatedEndpoint.APIBaseURL)
+ s.Require().Equal(newUploadBaseURL, updatedEndpoint.UploadBaseURL)
+ s.Require().Equal(newBaseURL, updatedEndpoint.BaseURL)
+ s.Require().Equal(caCertBundle, updatedEndpoint.CACertBundle)
+}
+
+func (s *GithubTestSuite) TestUpdateEndpointURLsFailsIfCredentialsAreAssociated() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(endpoint)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: testEndpointName,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err = s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+
+ newDescription := "new description"
+ newBaseURL := "https://new.example.com"
+ newAPIBaseURL := "https://new-api.example.com"
+ newUploadBaseURL := "https://new-uploads.example.com"
+ updateEpParams := params.UpdateGithubEndpointParams{
+ BaseURL: &newBaseURL,
+ }
+
+ _, err = s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating github endpoint: cannot update endpoint URLs with existing credentials: invalid request")
+
+ updateEpParams = params.UpdateGithubEndpointParams{
+ UploadBaseURL: &newUploadBaseURL,
+ }
+
+ _, err = s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating github endpoint: cannot update endpoint URLs with existing credentials: invalid request")
+
+ updateEpParams = params.UpdateGithubEndpointParams{
+ APIBaseURL: &newAPIBaseURL,
+ }
+ _, err = s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating github endpoint: cannot update endpoint URLs with existing credentials: invalid request")
+
+ updateEpParams = params.UpdateGithubEndpointParams{
+ Description: &newDescription,
+ }
+ ret, err := s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDescription, ret.Description)
+}
+
+func (s *GithubTestSuite) TestUpdatingNonExistingEndpointReturnsNotFoundError() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ newDescription := "test"
+ updateEpParams := params.UpdateGithubEndpointParams{
+ Description: &newDescription,
+ }
+
+ _, err := s.db.UpdateGithubEndpoint(ctx, "non-existing", updateEpParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestListEndpoints() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ createEpParams := params.CreateGithubEndpointParams{
+ Name: testEndpointName,
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ _, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
+ s.Require().NoError(err)
+
+ endpoints, err := s.db.ListGithubEndpoints(ctx)
+ s.Require().NoError(err)
+ s.Require().Len(endpoints, 2)
+}
+
+func (s *GithubTestSuite) TestCreateCredentialsFailsWithUnauthorizedForAnonUser() {
+ ctx := context.Background()
+
+ _, err := s.db.CreateGithubCredentials(ctx, params.CreateGithubCredentialsParams{})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrUnauthorized)
+}
+
+func (s *GithubTestSuite) TestCreateCredentialsFailsWhenEndpointNameIsEmpty() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGithubCredentials(ctx, params.CreateGithubCredentialsParams{})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().Regexp("endpoint name is required", err.Error())
+}
+
+func (s *GithubTestSuite) TestCreateCredentialsFailsWhenEndpointDoesNotExist() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGithubCredentials(ctx, params.CreateGithubCredentialsParams{Endpoint: "non-existing"})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+ s.Require().Regexp("endpoint not found", err.Error())
+}
+
+func (s *GithubTestSuite) TestCreateCredentialsFailsWhenAuthTypeIsInvalid() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.CreateGithubCredentials(ctx, params.CreateGithubCredentialsParams{Endpoint: defaultGithubEndpoint, AuthType: "invalid"})
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().Regexp("invalid auth type", err.Error())
+}
+
+func (s *GithubTestSuite) TestCreateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+ s.Require().Equal(credParams.Name, creds.Name)
+ s.Require().Equal(credParams.Description, creds.Description)
+ s.Require().Equal(credParams.Endpoint, creds.Endpoint.Name)
+ s.Require().Equal(credParams.AuthType, creds.AuthType)
+}
+
+func (s *GithubTestSuite) TestCreateCredentialsFailsOnDuplicateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "testuser", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+
+ // Creating creds with the same parameters should fail for the same user.
+ _, err = s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrDuplicateEntity)
+
+ // Creating creds with the same parameters should work for different users.
+ _, err = s.db.CreateGithubCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+}
+
+func (s *GithubTestSuite) TestNormalUsersCanOnlySeeTheirOwnCredentialsAdminCanSeeAll() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "testuser1", s.db, s.T())
+ testUser2 := garmTesting.CreateGARMTestUser(ctx, "testuser2", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+ testUser2Ctx := auth.PopulateContext(context.Background(), testUser2, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ credParams.Name = "test-creds2"
+ creds2, err := s.db.CreateGithubCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ credParams.Name = "test-creds3"
+ creds3, err := s.db.CreateGithubCredentials(testUser2Ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds3)
+
+ credsList, err := s.db.ListGithubCredentials(ctx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 3)
+
+ credsList, err = s.db.ListGithubCredentials(testUserCtx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 1)
+ s.Require().Equal("test-creds2", credsList[0].Name)
+
+ credsList, err = s.db.ListGithubCredentials(testUser2Ctx)
+ s.Require().NoError(err)
+ s.Require().Len(credsList, 1)
+ s.Require().Equal("test-creds3", credsList[0].Name)
+}
+
+func (s *GithubTestSuite) TestGetGithubCredentialsFailsWhenCredentialsDontExist() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ _, err := s.db.GetGithubCredentials(ctx, 1, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+
+ _, err = s.db.GetGithubCredentialsByName(ctx, "non-existing", true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestGetGithubCredentialsByNameReturnsOnlyCurrentUserCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user1", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ creds2, err := s.db.CreateGithubCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ creds2Get, err := s.db.GetGithubCredentialsByName(testUserCtx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(testCredsName, creds2Get.Name)
+ s.Require().Equal(creds2.ID, creds2Get.ID)
+
+ credsGet, err := s.db.GetGithubCredentialsByName(ctx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+ s.Require().Equal(testCredsName, credsGet.Name)
+ s.Require().Equal(creds.ID, credsGet.ID)
+
+ // Admin can get any creds by ID
+ credsGet, err = s.db.GetGithubCredentials(ctx, creds2.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds2.ID, credsGet.ID)
+
+ // Normal user cannot get other user creds by ID
+ _, err = s.db.GetGithubCredentials(testUserCtx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestGetGithubCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ creds2, err := s.db.GetGithubCredentialsByName(ctx, testCredsName, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds.Name, creds2.Name)
+ s.Require().Equal(creds.ID, creds2.ID)
+
+ creds2, err = s.db.GetGithubCredentials(ctx, creds.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+ s.Require().Equal(creds.Name, creds2.Name)
+ s.Require().Equal(creds.ID, creds2.ID)
+}
+
+func (s *GithubTestSuite) TestDeleteGithubCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGithubCredentials(ctx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestDeleteGithubCredentialsByNonAdminUser() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user4", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds4",
+ },
+ }
+
+ // Create creds as admin
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ // Deleting non existent creds will return a nil error. For the test user
+ // the creds created by the admin should not be visible, which leads to not found
+ // which in turn returns no error.
+ err = s.db.DeleteGithubCredentials(testUserCtx, creds.ID)
+ s.Require().NoError(err)
+
+ // Check that the creds created by the admin are still there.
+ credsGet, err := s.db.GetGithubCredentials(ctx, creds.ID, true)
+ s.Require().NoError(err)
+ s.Require().NotNil(credsGet)
+ s.Require().Equal(creds.ID, credsGet.ID)
+
+ // Create the same creds with the test user.
+ creds2, err := s.db.CreateGithubCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds2)
+
+ // Remove creds created by test user.
+ err = s.db.DeleteGithubCredentials(testUserCtx, creds2.ID)
+ s.Require().NoError(err)
+
+ // The creds created by the test user should be gone.
+ _, err = s.db.GetGithubCredentials(testUserCtx, creds2.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestDeleteCredentialsFailsIfReposOrgsOrEntitiesUseIt() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ repo, err := s.db.CreateRepository(ctx, "test-owner", "test-repo", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(repo)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteRepository(ctx, repo.ID)
+ s.Require().NoError(err)
+
+ org, err := s.db.CreateOrganization(ctx, "test-org", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(org)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteOrganization(ctx, org.ID)
+ s.Require().NoError(err)
+
+ enterprise, err := s.db.CreateEnterprise(ctx, "test-enterprise", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(enterprise)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+
+ err = s.db.DeleteEnterprise(ctx, enterprise.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGithubCredentials(ctx, creds.ID, true)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestUpdateCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "new description"
+ newName := "new-name"
+ newToken := "new-token"
+ updateCredParams := params.UpdateGithubCredentialsParams{
+ Description: &newDescription,
+ Name: &newName,
+ PAT: ¶ms.GithubPAT{
+ OAuth2Token: newToken,
+ },
+ }
+
+ updatedCreds, err := s.db.UpdateGithubCredentials(ctx, creds.ID, updateCredParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(updatedCreds)
+ s.Require().Equal(newDescription, updatedCreds.Description)
+ s.Require().Equal(newName, updatedCreds.Name)
+}
+
+func (s *GithubTestSuite) TestUpdateGithubCredentialsFailIfWrongCredentialTypeIsPassed() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ updateCredParams := params.UpdateGithubCredentialsParams{
+ App: ¶ms.GithubApp{
+ AppID: 1,
+ InstallationID: 2,
+ PrivateKeyBytes: []byte("test"),
+ },
+ }
+
+ _, err = s.db.UpdateGithubCredentials(ctx, creds.ID, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating github credentials: cannot update app credentials for PAT: invalid request")
+
+ credParamsWithApp := params.CreateGithubCredentialsParams{
+ Name: "test-credsApp",
+ Description: "test credsApp",
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypeApp,
+ App: params.GithubApp{
+ AppID: 1,
+ InstallationID: 2,
+ PrivateKeyBytes: []byte("test"),
+ },
+ }
+
+ credsApp, err := s.db.CreateGithubCredentials(ctx, credParamsWithApp)
+ s.Require().NoError(err)
+ s.Require().NotNil(credsApp)
+
+ updateCredParams = params.UpdateGithubCredentialsParams{
+ PAT: ¶ms.GithubPAT{
+ OAuth2Token: "test",
+ },
+ }
+
+ _, err = s.db.UpdateGithubCredentials(ctx, credsApp.ID, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
+ s.Require().EqualError(err, "error updating github credentials: cannot update PAT credentials for app: invalid request")
+}
+
+func (s *GithubTestSuite) TestUpdateCredentialsFailsForNonExistingCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ updateCredParams := params.UpdateGithubCredentialsParams{
+ Description: nil,
+ }
+
+ _, err := s.db.UpdateGithubCredentials(ctx, 1, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestUpdateCredentialsFailsIfCredentialsAreOwnedByNonAdminUser() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user5", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "new description2"
+ updateCredParams := params.UpdateGithubCredentialsParams{
+ Description: &newDescription,
+ }
+
+ _, err = s.db.UpdateGithubCredentials(testUserCtx, creds.ID, updateCredParams)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func (s *GithubTestSuite) TestAdminUserCanUpdateAnyGithubCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+ testUser := garmTesting.CreateGARMTestUser(ctx, "test-user5", s.db, s.T())
+ testUserCtx := auth.PopulateContext(context.Background(), testUser, nil)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: defaultGithubEndpoint,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(testUserCtx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ newDescription := "new description2"
+ updateCredParams := params.UpdateGithubCredentialsParams{
+ Description: &newDescription,
+ }
+
+ newCreds, err := s.db.UpdateGithubCredentials(ctx, creds.ID, updateCredParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDescription, newCreds.Description)
+}
+
+func (s *GithubTestSuite) TestDeleteGithubEndpointFailsWithOrgsReposOrCredentials() {
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
+
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "deleteme",
+ Description: testEndpointDescription,
+ APIBaseURL: testAPIBaseURL,
+ BaseURL: testBaseURL,
+ }
+
+ ep, err := s.db.CreateGithubEndpoint(ctx, endpointParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(ep)
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: testCredsName,
+ Description: testCredsDescription,
+ Endpoint: ep.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-creds5",
+ },
+ }
+
+ creds, err := s.db.CreateGithubCredentials(ctx, credParams)
+ s.Require().NoError(err)
+ s.Require().NotNil(creds)
+
+ repo, err := s.db.CreateRepository(ctx, "test-owner", "test-repo", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(repo)
+
+ badRequest := &runnerErrors.BadRequestError{}
+ err = s.db.DeleteGithubEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorAs(err, &badRequest)
+
+ err = s.db.DeleteRepository(ctx, repo.ID)
+ s.Require().NoError(err)
+
+ org, err := s.db.CreateOrganization(ctx, "test-org", creds, "superSecret@123BlaBla", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotNil(org)
+
+ err = s.db.DeleteGithubEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorAs(err, &badRequest)
+
+ err = s.db.DeleteOrganization(ctx, org.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGithubCredentials(ctx, creds.ID)
+ s.Require().NoError(err)
+
+ err = s.db.DeleteGithubEndpoint(ctx, ep.Name)
+ s.Require().NoError(err)
+
+ _, err = s.db.GetGithubEndpoint(ctx, ep.Name)
+ s.Require().Error(err)
+ s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
+}
+
+func TestGithubTestSuite(t *testing.T) {
+ suite.Run(t, new(GithubTestSuite))
+}
+
+func TestCredentialsAndEndpointMigration(t *testing.T) {
+ cfg := garmTesting.GetTestSqliteDBConfig(t)
+
+ // Copy the sample DB
+ data, err := os.ReadFile("../../testdata/db/v0.1.4/garm.db")
+ if err != nil {
+ t.Fatalf("failed to read test data: %s", err)
+ }
+
+ if cfg.SQLite.DBFile == "" {
+ t.Fatalf("DB file not set")
+ }
+ if err := os.WriteFile(cfg.SQLite.DBFile, data, 0o600); err != nil {
+ t.Fatalf("failed to write test data: %s", err)
+ }
+
+ // define some credentials
+ credentials := []config.Github{
+ {
+ Name: "test-creds",
+ Description: "test creds",
+ AuthType: config.GithubAuthTypePAT,
+ PAT: config.GithubPAT{
+ OAuth2Token: "test",
+ },
+ },
+ {
+ Name: "ghes-test",
+ Description: "ghes creds",
+ APIBaseURL: testAPIBaseURL,
+ UploadBaseURL: testUploadBaseURL,
+ BaseURL: testBaseURL,
+ AuthType: config.GithubAuthTypeApp,
+ App: config.GithubApp{
+ AppID: 1,
+ InstallationID: 99,
+ PrivateKeyPath: "../../testdata/certs/srv-key.pem",
+ },
+ },
+ }
+ // Set the config credentials in the cfg. This is what happens in the main function.
+ // of GARM as well.
+ cfg.MigrateCredentials = credentials
+
+ db, err := NewSQLDatabase(context.Background(), cfg)
+ if err != nil {
+ t.Fatalf("failed to create db connection: %s", err)
+ }
+
+ // We expect that 2 endpoints will exist in the migrated DB and 2 credentials.
+ ctx := garmTesting.ImpersonateAdminContext(context.Background(), db, t)
+
+ endpoints, err := db.ListGithubEndpoints(ctx)
+ if err != nil {
+ t.Fatalf("failed to list endpoints: %s", err)
+ }
+ if len(endpoints) != 2 {
+ t.Fatalf("expected 2 endpoints, got %d", len(endpoints))
+ }
+ if endpoints[0].Name != defaultGithubEndpoint {
+ t.Fatalf("expected default endpoint to exist, got %s", endpoints[0].Name)
+ }
+ if endpoints[1].Name != "example.com" {
+ t.Fatalf("expected example.com endpoint to exist, got %s", endpoints[1].Name)
+ }
+ if endpoints[1].UploadBaseURL != testUploadBaseURL {
+ t.Fatalf("expected upload base URL to be %s, got %s", testUploadBaseURL, endpoints[1].UploadBaseURL)
+ }
+ if endpoints[1].BaseURL != testBaseURL {
+ t.Fatalf("expected base URL to be %s, got %s", testBaseURL, endpoints[1].BaseURL)
+ }
+ if endpoints[1].APIBaseURL != testAPIBaseURL {
+ t.Fatalf("expected API base URL to be %s, got %s", testAPIBaseURL, endpoints[1].APIBaseURL)
+ }
+
+ creds, err := db.ListGithubCredentials(ctx)
+ if err != nil {
+ t.Fatalf("failed to list credentials: %s", err)
+ }
+ if len(creds) != 2 {
+ t.Fatalf("expected 2 credentials, got %d", len(creds))
+ }
+ if creds[0].Name != "test-creds" {
+ t.Fatalf("expected test-creds to exist, got %s", creds[0].Name)
+ }
+ if creds[1].Name != "ghes-test" {
+ t.Fatalf("expected ghes-test to exist, got %s", creds[1].Name)
+ }
+ if creds[0].Endpoint.Name != defaultGithubEndpoint {
+ t.Fatalf("expected test-creds to be associated with default endpoint, got %s", creds[0].Endpoint.Name)
+ }
+ if creds[1].Endpoint.Name != "example.com" {
+ t.Fatalf("expected ghes-test to be associated with example.com endpoint, got %s", creds[1].Endpoint.Name)
+ }
+
+ if creds[0].AuthType != params.ForgeAuthTypePAT {
+ t.Fatalf("expected test-creds to have PAT auth type, got %s", creds[0].AuthType)
+ }
+ if creds[1].AuthType != params.ForgeAuthTypeApp {
+ t.Fatalf("expected ghes-test to have App auth type, got %s", creds[1].AuthType)
+ }
+ if len(creds[0].CredentialsPayload) == 0 {
+ t.Fatalf("expected test-creds to have credentials payload, got empty")
+ }
+
+ var pat params.GithubPAT
+ if err := json.Unmarshal(creds[0].CredentialsPayload, &pat); err != nil {
+ t.Fatalf("failed to unmarshal test-creds credentials payload: %s", err)
+ }
+ if pat.OAuth2Token != "test" {
+ t.Fatalf("expected test-creds to have PAT token test, got %s", pat.OAuth2Token)
+ }
+
+ var app params.GithubApp
+ if err := json.Unmarshal(creds[1].CredentialsPayload, &app); err != nil {
+ t.Fatalf("failed to unmarshal ghes-test credentials payload: %s", err)
+ }
+ if app.AppID != 1 {
+ t.Fatalf("expected ghes-test to have app ID 1, got %d", app.AppID)
+ }
+ if app.InstallationID != 99 {
+ t.Fatalf("expected ghes-test to have installation ID 99, got %d", app.InstallationID)
+ }
+ if app.PrivateKeyBytes == nil {
+ t.Fatalf("expected ghes-test to have private key bytes, got nil")
+ }
+
+ certBundle, err := credentials[1].App.PrivateKeyBytes()
+ if err != nil {
+ t.Fatalf("failed to read CA cert bundle: %s", err)
+ }
+
+ if !bytes.Equal(app.PrivateKeyBytes, certBundle) {
+ t.Fatalf("expected ghes-test private key to be equal to the CA cert bundle")
+ }
+}
diff --git a/database/sql/instances.go b/database/sql/instances.go
index ab83763c..5f9d018e 100644
--- a/database/sql/instances.go
+++ b/database/sql/instances.go
@@ -16,20 +16,47 @@ package sql
import (
"context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
-
- "github.com/pkg/errors"
- uuid "github.com/satori/go.uuid"
+ "github.com/google/uuid"
+ "gorm.io/datatypes"
"gorm.io/gorm"
"gorm.io/gorm/clause"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
)
-func (s *sqlDatabase) CreateInstance(ctx context.Context, poolID string, param params.CreateInstanceParams) (params.Instance, error) {
- pool, err := s.getPoolByID(ctx, poolID)
+func (s *sqlDatabase) CreateInstance(_ context.Context, poolID string, param params.CreateInstanceParams) (instance params.Instance, err error) {
+ pool, err := s.getPoolByID(s.conn, poolID)
if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching pool")
+ return params.Instance{}, fmt.Errorf("error fetching pool: %w", err)
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.InstanceEntityType, common.CreateOperation, instance)
+ }
+ }()
+
+ var labels datatypes.JSON
+ if len(param.AditionalLabels) > 0 {
+ labels, err = json.Marshal(param.AditionalLabels)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error marshalling labels: %w", err)
+ }
+ }
+
+ var secret []byte
+ if len(param.JitConfiguration) > 0 {
+ secret, err = s.marshalAndSeal(param.JitConfiguration)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error marshalling jit config: %w", err)
+ }
}
newInstance := Instance{
@@ -42,35 +69,22 @@ func (s *sqlDatabase) CreateInstance(ctx context.Context, poolID string, param p
CallbackURL: param.CallbackURL,
MetadataURL: param.MetadataURL,
GitHubRunnerGroup: param.GitHubRunnerGroup,
+ JitConfiguration: secret,
+ AditionalLabels: labels,
+ AgentID: param.AgentID,
}
q := s.conn.Create(&newInstance)
if q.Error != nil {
- return params.Instance{}, errors.Wrap(q.Error, "creating instance")
+ return params.Instance{}, fmt.Errorf("error creating instance: %w", q.Error)
}
- return s.sqlToParamsInstance(newInstance), nil
+ return s.sqlToParamsInstance(newInstance)
}
-func (s *sqlDatabase) getInstanceByID(ctx context.Context, instanceID string) (Instance, error) {
- u, err := uuid.FromString(instanceID)
+func (s *sqlDatabase) getPoolInstanceByName(poolID string, instanceName string) (Instance, error) {
+ pool, err := s.getPoolByID(s.conn, poolID)
if err != nil {
- return Instance{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
- }
- var instance Instance
- q := s.conn.Model(&Instance{}).
- Preload(clause.Associations).
- Where("id = ?", u).
- First(&instance)
- if q.Error != nil {
- return Instance{}, errors.Wrap(q.Error, "fetching instance")
- }
- return instance, nil
-}
-
-func (s *sqlDatabase) getPoolInstanceByName(ctx context.Context, poolID string, instanceName string) (Instance, error) {
- pool, err := s.getPoolByID(ctx, poolID)
- if err != nil {
- return Instance{}, errors.Wrap(err, "fetching pool")
+ return Instance{}, fmt.Errorf("error fetching pool: %w", err)
}
var instance Instance
@@ -80,16 +94,25 @@ func (s *sqlDatabase) getPoolInstanceByName(ctx context.Context, poolID string,
First(&instance)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return Instance{}, errors.Wrap(runnerErrors.ErrNotFound, "fetching pool instance by name")
+ return Instance{}, fmt.Errorf("error fetching pool instance by name: %w", runnerErrors.ErrNotFound)
}
- return Instance{}, errors.Wrap(q.Error, "fetching pool instance by name")
+ return Instance{}, fmt.Errorf("error fetching pool instance by name: %w", q.Error)
}
+
+ instance.Pool = pool
return instance, nil
}
-func (s *sqlDatabase) getInstanceByName(ctx context.Context, instanceName string, preload ...string) (Instance, error) {
+func (s *sqlDatabase) getInstance(_ context.Context, instanceNameOrID string, preload ...string) (Instance, error) {
var instance Instance
+ var whereArg any = instanceNameOrID
+ whereClause := "name = ?"
+ id, err := uuid.Parse(instanceNameOrID)
+ if err == nil {
+ whereArg = id
+ whereClause = "id = ?"
+ }
q := s.conn
if len(preload) > 0 {
@@ -100,79 +123,115 @@ func (s *sqlDatabase) getInstanceByName(ctx context.Context, instanceName string
q = q.Model(&Instance{}).
Preload(clause.Associations).
- Where("name = ?", instanceName).
+ Where(whereClause, whereArg).
First(&instance)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return Instance{}, errors.Wrap(runnerErrors.ErrNotFound, "fetching instance by name")
+ return Instance{}, fmt.Errorf("error fetching instance by name: %w", runnerErrors.ErrNotFound)
}
- return Instance{}, errors.Wrap(q.Error, "fetching instance by name")
+ return Instance{}, fmt.Errorf("error fetching instance by name: %w", q.Error)
}
return instance, nil
}
-func (s *sqlDatabase) GetPoolInstanceByName(ctx context.Context, poolID string, instanceName string) (params.Instance, error) {
- instance, err := s.getPoolInstanceByName(ctx, poolID, instanceName)
+func (s *sqlDatabase) GetInstance(ctx context.Context, instanceName string) (params.Instance, error) {
+ instance, err := s.getInstance(ctx, instanceName, "StatusMessages", "Pool", "ScaleSet")
if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching instance")
+ return params.Instance{}, fmt.Errorf("error fetching instance: %w", err)
}
- return s.sqlToParamsInstance(instance), nil
+ return s.sqlToParamsInstance(instance)
}
-func (s *sqlDatabase) GetInstanceByName(ctx context.Context, instanceName string) (params.Instance, error) {
- instance, err := s.getInstanceByName(ctx, instanceName, "StatusMessages")
+func (s *sqlDatabase) DeleteInstance(_ context.Context, poolID string, instanceName string) (err error) {
+ instance, err := s.getPoolInstanceByName(poolID, instanceName)
if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching instance")
+ if errors.Is(err, runnerErrors.ErrNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error deleting instance: %w", err)
}
- return s.sqlToParamsInstance(instance), nil
-}
+ defer func() {
+ if err == nil {
+ var providerID string
+ if instance.ProviderID != nil {
+ providerID = *instance.ProviderID
+ }
+ instanceNotif := params.Instance{
+ ID: instance.ID.String(),
+ Name: instance.Name,
+ ProviderID: providerID,
+ AgentID: instance.AgentID,
+ }
+ switch {
+ case instance.PoolID != nil:
+ instanceNotif.PoolID = instance.PoolID.String()
+ case instance.ScaleSetFkID != nil:
+ instanceNotif.ScaleSetID = *instance.ScaleSetFkID
+ }
+
+ if notifyErr := s.sendNotify(common.InstanceEntityType, common.DeleteOperation, instanceNotif); notifyErr != nil {
+ slog.With(slog.Any("error", notifyErr)).Error("failed to send notify")
+ }
+ }
+ }()
-func (s *sqlDatabase) DeleteInstance(ctx context.Context, poolID string, instanceName string) error {
- instance, err := s.getPoolInstanceByName(ctx, poolID, instanceName)
- if err != nil {
- return errors.Wrap(err, "deleting instance")
- }
if q := s.conn.Unscoped().Delete(&instance); q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return nil
}
- return errors.Wrap(q.Error, "deleting instance")
+ return fmt.Errorf("error deleting instance: %w", q.Error)
}
return nil
}
-func (s *sqlDatabase) ListInstanceEvents(ctx context.Context, instanceID string, eventType params.EventType, eventLevel params.EventLevel) ([]params.StatusMessage, error) {
- var events []InstanceStatusUpdate
- query := s.conn.Model(&InstanceStatusUpdate{}).Where("instance_id = ?", instanceID)
- if eventLevel != "" {
- query = query.Where("event_level = ?", eventLevel)
- }
-
- if eventType != "" {
- query = query.Where("event_type = ?", eventType)
- }
-
- if result := query.Find(&events); result.Error != nil {
- return nil, errors.Wrap(result.Error, "fetching events")
- }
-
- eventParams := make([]params.StatusMessage, len(events))
- for idx, val := range events {
- eventParams[idx] = params.StatusMessage{
- Message: val.Message,
- EventType: val.EventType,
- EventLevel: val.EventLevel,
+func (s *sqlDatabase) DeleteInstanceByName(ctx context.Context, instanceName string) error {
+ instance, err := s.getInstance(ctx, instanceName, "Pool", "ScaleSet")
+ if err != nil {
+ if errors.Is(err, runnerErrors.ErrNotFound) {
+ return nil
}
+ return fmt.Errorf("error deleting instance: %w", err)
}
- return eventParams, nil
+
+ defer func() {
+ if err == nil {
+ var providerID string
+ if instance.ProviderID != nil {
+ providerID = *instance.ProviderID
+ }
+ payload := params.Instance{
+ ID: instance.ID.String(),
+ Name: instance.Name,
+ ProviderID: providerID,
+ AgentID: instance.AgentID,
+ }
+ if instance.PoolID != nil {
+ payload.PoolID = instance.PoolID.String()
+ }
+ if instance.ScaleSetFkID != nil {
+ payload.ScaleSetID = *instance.ScaleSetFkID
+ }
+ if notifyErr := s.sendNotify(common.InstanceEntityType, common.DeleteOperation, payload); notifyErr != nil {
+ slog.With(slog.Any("error", notifyErr)).Error("failed to send notify")
+ }
+ }
+ }()
+
+ if q := s.conn.Unscoped().Delete(&instance); q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error deleting instance: %w", q.Error)
+ }
+ return nil
}
-func (s *sqlDatabase) AddInstanceEvent(ctx context.Context, instanceID string, event params.EventType, eventLevel params.EventLevel, statusMessage string) error {
- instance, err := s.getInstanceByID(ctx, instanceID)
+func (s *sqlDatabase) AddInstanceEvent(ctx context.Context, instanceName string, event params.EventType, eventLevel params.EventLevel, statusMessage string) error {
+ instance, err := s.getInstance(ctx, instanceName)
if err != nil {
- return errors.Wrap(err, "updating instance")
+ return fmt.Errorf("error updating instance: %w", err)
}
msg := InstanceStatusUpdate{
@@ -182,15 +241,15 @@ func (s *sqlDatabase) AddInstanceEvent(ctx context.Context, instanceID string, e
}
if err := s.conn.Model(&instance).Association("StatusMessages").Append(&msg); err != nil {
- return errors.Wrap(err, "adding status message")
+ return fmt.Errorf("error adding status message: %w", err)
}
return nil
}
-func (s *sqlDatabase) UpdateInstance(ctx context.Context, instanceID string, param params.UpdateInstanceParams) (params.Instance, error) {
- instance, err := s.getInstanceByID(ctx, instanceID)
+func (s *sqlDatabase) UpdateInstance(ctx context.Context, instanceName string, param params.UpdateInstanceParams) (params.Instance, error) {
+ instance, err := s.getInstance(ctx, instanceName, "Pool", "ScaleSet")
if err != nil {
- return params.Instance{}, errors.Wrap(err, "updating instance")
+ return params.Instance{}, fmt.Errorf("error updating instance: %w", err)
}
if param.AgentID != 0 {
@@ -224,11 +283,19 @@ func (s *sqlDatabase) UpdateInstance(ctx context.Context, instanceID string, par
instance.TokenFetched = *param.TokenFetched
}
+ if param.JitConfiguration != nil {
+ secret, err := s.marshalAndSeal(param.JitConfiguration)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error marshalling jit config: %w", err)
+ }
+ instance.JitConfiguration = secret
+ }
+
instance.ProviderFault = param.ProviderFault
q := s.conn.Save(&instance)
if q.Error != nil {
- return params.Instance{}, errors.Wrap(q.Error, "updating instance")
+ return params.Instance{}, fmt.Errorf("error updating instance: %w", q.Error)
}
if len(param.Addresses) > 0 {
@@ -240,57 +307,75 @@ func (s *sqlDatabase) UpdateInstance(ctx context.Context, instanceID string, par
})
}
if err := s.conn.Model(&instance).Association("Addresses").Replace(addrs); err != nil {
- return params.Instance{}, errors.Wrap(err, "updating addresses")
+ return params.Instance{}, fmt.Errorf("error updating addresses: %w", err)
}
}
-
- return s.sqlToParamsInstance(instance), nil
+ inst, err := s.sqlToParamsInstance(instance)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error converting instance: %w", err)
+ }
+ s.sendNotify(common.InstanceEntityType, common.UpdateOperation, inst)
+ return inst, nil
}
-func (s *sqlDatabase) ListPoolInstances(ctx context.Context, poolID string) ([]params.Instance, error) {
- u, err := uuid.FromString(poolID)
+func (s *sqlDatabase) ListPoolInstances(_ context.Context, poolID string) ([]params.Instance, error) {
+ u, err := uuid.Parse(poolID)
if err != nil {
- return nil, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return nil, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
}
var instances []Instance
- query := s.conn.Model(&Instance{}).Where("pool_id = ?", u)
+ query := s.conn.
+ Preload("Pool").
+ Preload("Job").
+ Where("pool_id = ?", u)
if err := query.Find(&instances); err.Error != nil {
- return nil, errors.Wrap(err.Error, "fetching instances")
+ return nil, fmt.Errorf("error fetching instances: %w", err.Error)
}
ret := make([]params.Instance, len(instances))
for idx, inst := range instances {
- ret[idx] = s.sqlToParamsInstance(inst)
+ ret[idx], err = s.sqlToParamsInstance(inst)
+ if err != nil {
+ return nil, fmt.Errorf("error converting instance: %w", err)
+ }
}
return ret, nil
}
-func (s *sqlDatabase) ListAllInstances(ctx context.Context) ([]params.Instance, error) {
+func (s *sqlDatabase) ListAllInstances(_ context.Context) ([]params.Instance, error) {
var instances []Instance
- q := s.conn.Model(&Instance{}).Find(&instances)
+ q := s.conn.
+ Preload("Pool").
+ Preload("ScaleSet").
+ Preload("Job").
+ Find(&instances)
if q.Error != nil {
- return nil, errors.Wrap(q.Error, "fetching instances")
+ return nil, fmt.Errorf("error fetching instances: %w", q.Error)
}
ret := make([]params.Instance, len(instances))
+ var err error
for idx, instance := range instances {
- ret[idx] = s.sqlToParamsInstance(instance)
+ ret[idx], err = s.sqlToParamsInstance(instance)
+ if err != nil {
+ return nil, fmt.Errorf("error converting instance: %w", err)
+ }
}
return ret, nil
}
-func (s *sqlDatabase) PoolInstanceCount(ctx context.Context, poolID string) (int64, error) {
- pool, err := s.getPoolByID(ctx, poolID)
+func (s *sqlDatabase) PoolInstanceCount(_ context.Context, poolID string) (int64, error) {
+ pool, err := s.getPoolByID(s.conn, poolID)
if err != nil {
- return 0, errors.Wrap(err, "fetching pool")
+ return 0, fmt.Errorf("error fetching pool: %w", err)
}
var cnt int64
q := s.conn.Model(&Instance{}).Where("pool_id = ?", pool.ID).Count(&cnt)
if q.Error != nil {
- return 0, errors.Wrap(q.Error, "fetching instance count")
+ return 0, fmt.Errorf("error fetching instance count: %w", q.Error)
}
return cnt, nil
}
diff --git a/database/sql/instances_test.go b/database/sql/instances_test.go
index 0c18e5be..5ec55107 100644
--- a/database/sql/instances_test.go
+++ b/database/sql/instances_test.go
@@ -22,17 +22,16 @@ import (
"sort"
"testing"
- dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/providers/common"
-
- "gopkg.in/DATA-DOG/go-sqlmock.v1"
-
"github.com/stretchr/testify/suite"
+ "gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type InstancesTestFixtures struct {
@@ -49,6 +48,7 @@ type InstancesTestSuite struct {
Store dbCommon.Store
StoreSQLMocked *sqlDatabase
Fixtures *InstancesTestFixtures
+ adminCtx context.Context
}
func (s *InstancesTestSuite) equalInstancesByName(expected, actual []params.Instance) {
@@ -77,8 +77,14 @@ func (s *InstancesTestSuite) SetupTest() {
}
s.Store = db
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.adminCtx = adminCtx
+
+ githubEndpoint := garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), githubEndpoint)
+
// create an organization for testing purposes
- org, err := s.Store.CreateOrganization(context.Background(), "test-org", "test-creds", "test-webhookSecret")
+ org, err := s.Store.CreateOrganization(s.adminCtx, "test-org", creds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create org: %s", err))
}
@@ -91,9 +97,11 @@ func (s *InstancesTestSuite) SetupTest() {
Image: "test-image",
Flavor: "test-flavor",
OSType: "linux",
- Tags: []string{"self-hosted", "amd64", "linux"},
+ Tags: []string{"amd64", "linux"},
}
- pool, err := s.Store.CreateOrganizationPool(context.Background(), org.ID, createPoolParams)
+ entity, err := org.GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, createPoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create org pool: %s", err))
}
@@ -102,15 +110,21 @@ func (s *InstancesTestSuite) SetupTest() {
instances := []params.Instance{}
for i := 1; i <= 3; i++ {
instance, err := db.CreateInstance(
- context.Background(),
+ s.adminCtx,
pool.ID,
params.CreateInstanceParams{
Name: fmt.Sprintf("test-instance-%d", i),
OSType: "linux",
OSArch: "amd64",
CallbackURL: "https://garm.example.com/",
- Status: common.InstanceRunning,
- RunnerStatus: common.RunnerIdle,
+ Status: commonParams.InstanceRunning,
+ RunnerStatus: params.RunnerIdle,
+ JitConfiguration: map[string]string{
+ "secret": fmt.Sprintf("secret-%d", i),
+ },
+ AditionalLabels: []string{
+ fmt.Sprintf("label-%d", i),
+ },
},
)
if err != nil {
@@ -130,7 +144,7 @@ func (s *InstancesTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -156,18 +170,18 @@ func (s *InstancesTestSuite) SetupTest() {
ProviderID: "update-provider-test",
OSName: "ubuntu",
OSVersion: "focal",
- Status: common.InstancePendingDelete,
- RunnerStatus: common.RunnerActive,
+ Status: commonParams.InstancePendingDelete,
+ RunnerStatus: params.RunnerActive,
AgentID: 4,
CreateAttempt: 3,
- Addresses: []params.Address{
+ Addresses: []commonParams.Address{
{
Address: "12.10.12.10",
- Type: params.PublicAddress,
+ Type: commonParams.PublicAddress,
},
{
Address: "10.1.1.2",
- Type: params.PrivateAddress,
+ Type: commonParams.PrivateAddress,
},
},
},
@@ -178,11 +192,11 @@ func (s *InstancesTestSuite) SetupTest() {
func (s *InstancesTestSuite) TestCreateInstance() {
// call tested function
- instance, err := s.Store.CreateInstance(context.Background(), s.Fixtures.Pool.ID, s.Fixtures.CreateInstanceParams)
+ instance, err := s.Store.CreateInstance(s.adminCtx, s.Fixtures.Pool.ID, s.Fixtures.CreateInstanceParams)
// assertions
s.Require().Nil(err)
- storeInstance, err := s.Store.GetInstanceByName(context.Background(), s.Fixtures.CreateInstanceParams.Name)
+ storeInstance, err := s.Store.GetInstance(s.adminCtx, s.Fixtures.CreateInstanceParams.Name)
if err != nil {
s.FailNow(fmt.Sprintf("failed to get instance: %v", err))
}
@@ -194,17 +208,17 @@ func (s *InstancesTestSuite) TestCreateInstance() {
}
func (s *InstancesTestSuite) TestCreateInstanceInvalidPoolID() {
- _, err := s.Store.CreateInstance(context.Background(), "dummy-pool-id", params.CreateInstanceParams{})
+ _, err := s.Store.CreateInstance(s.adminCtx, "dummy-pool-id", params.CreateInstanceParams{})
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error parsing id: invalid request", err.Error())
}
func (s *InstancesTestSuite) TestCreateInstanceDBCreateErr() {
pool := s.Fixtures.Pool
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
+ WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@@ -215,36 +229,17 @@ func (s *InstancesTestSuite) TestCreateInstanceDBCreateErr() {
WillReturnError(fmt.Errorf("mocked insert instance error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateInstance(context.Background(), pool.ID, s.Fixtures.CreateInstanceParams)
+ _, err := s.StoreSQLMocked.CreateInstance(s.adminCtx, pool.ID, s.Fixtures.CreateInstanceParams)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("creating instance: mocked insert instance error", err.Error())
-}
-
-func (s *InstancesTestSuite) TestGetPoolInstanceByName() {
- storeInstance := s.Fixtures.Instances[0] // this is already created in `SetupTest()`
-
- instance, err := s.Store.GetPoolInstanceByName(context.Background(), s.Fixtures.Pool.ID, storeInstance.Name)
-
- s.Require().Nil(err)
- s.Require().Equal(storeInstance.Name, instance.Name)
- s.Require().Equal(storeInstance.PoolID, instance.PoolID)
- s.Require().Equal(storeInstance.OSArch, instance.OSArch)
- s.Require().Equal(storeInstance.OSType, instance.OSType)
- s.Require().Equal(storeInstance.CallbackURL, instance.CallbackURL)
-}
-
-func (s *InstancesTestSuite) TestGetPoolInstanceByNameNotFound() {
- _, err := s.Store.GetPoolInstanceByName(context.Background(), s.Fixtures.Pool.ID, "not-existent-instance-name")
-
- s.Require().Equal("fetching instance: fetching pool instance by name: not found", err.Error())
+ s.Require().Equal("error creating instance: mocked insert instance error", err.Error())
}
func (s *InstancesTestSuite) TestGetInstanceByName() {
storeInstance := s.Fixtures.Instances[1]
- instance, err := s.Store.GetInstanceByName(context.Background(), storeInstance.Name)
+ instance, err := s.Store.GetInstance(s.adminCtx, storeInstance.Name)
s.Require().Nil(err)
s.Require().Equal(storeInstance.Name, instance.Name)
@@ -255,26 +250,43 @@ func (s *InstancesTestSuite) TestGetInstanceByName() {
}
func (s *InstancesTestSuite) TestGetInstanceByNameFetchInstanceFailed() {
- _, err := s.Store.GetInstanceByName(context.Background(), "not-existent-instance-name")
+ _, err := s.Store.GetInstance(s.adminCtx, "not-existent-instance-name")
- s.Require().Equal("fetching instance: fetching instance by name: not found", err.Error())
+ s.Require().Equal("error fetching instance: error fetching instance by name: not found", err.Error())
}
func (s *InstancesTestSuite) TestDeleteInstance() {
storeInstance := s.Fixtures.Instances[0]
- err := s.Store.DeleteInstance(context.Background(), s.Fixtures.Pool.ID, storeInstance.Name)
+ err := s.Store.DeleteInstance(s.adminCtx, s.Fixtures.Pool.ID, storeInstance.Name)
s.Require().Nil(err)
- _, err = s.Store.GetPoolInstanceByName(context.Background(), s.Fixtures.Pool.ID, storeInstance.Name)
- s.Require().Equal("fetching instance: fetching pool instance by name: not found", err.Error())
+ _, err = s.Store.GetInstance(s.adminCtx, storeInstance.Name)
+ s.Require().Equal("error fetching instance: error fetching instance by name: not found", err.Error())
+
+ err = s.Store.DeleteInstance(s.adminCtx, s.Fixtures.Pool.ID, storeInstance.Name)
+ s.Require().Nil(err)
+}
+
+func (s *InstancesTestSuite) TestDeleteInstanceByName() {
+ storeInstance := s.Fixtures.Instances[0]
+
+ err := s.Store.DeleteInstanceByName(s.adminCtx, storeInstance.Name)
+
+ s.Require().Nil(err)
+
+ _, err = s.Store.GetInstance(s.adminCtx, storeInstance.Name)
+ s.Require().Equal("error fetching instance: error fetching instance by name: not found", err.Error())
+
+ err = s.Store.DeleteInstanceByName(s.adminCtx, storeInstance.Name)
+ s.Require().Nil(err)
}
func (s *InstancesTestSuite) TestDeleteInstanceInvalidPoolID() {
- err := s.Store.DeleteInstance(context.Background(), "dummy-pool-id", "dummy-instance-name")
+ err := s.Store.DeleteInstance(s.adminCtx, "dummy-pool-id", "dummy-instance-name")
- s.Require().Equal("deleting instance: fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error deleting instance: error fetching pool: error parsing id: invalid request", err.Error())
}
func (s *InstancesTestSuite) TestDeleteInstanceDBRecordNotFoundErr() {
@@ -282,17 +294,21 @@ func (s *InstancesTestSuite) TestDeleteInstanceDBRecordNotFoundErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
+ WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
- WithArgs(instance.Name, pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
+ WithArgs(instance.Name, pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
WillReturnRows(sqlmock.NewRows([]string{"address", "type", "instance_id"}).AddRow("10.10.1.10", "private", instance.ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `workflow_jobs` WHERE `workflow_jobs`.`instance_id` = ? AND `workflow_jobs`.`deleted_at` IS NULL")).
+ WithArgs(instance.ID).
+ WillReturnRows(sqlmock.NewRows([]string{}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instance_status_updates` WHERE `instance_status_updates`.`instance_id` = ? AND `instance_status_updates`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
@@ -304,7 +320,7 @@ func (s *InstancesTestSuite) TestDeleteInstanceDBRecordNotFoundErr() {
WillReturnError(gorm.ErrRecordNotFound)
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeleteInstance(context.Background(), pool.ID, instance.Name)
+ err := s.StoreSQLMocked.DeleteInstance(s.adminCtx, pool.ID, instance.Name)
s.assertSQLMockExpectations()
s.Require().Nil(err)
@@ -315,17 +331,21 @@ func (s *InstancesTestSuite) TestDeleteInstanceDBDeleteErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
+ WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
- WithArgs(instance.Name, pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
+ WithArgs(instance.Name, pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
WillReturnRows(sqlmock.NewRows([]string{"address", "type", "instance_id"}).AddRow("12.10.12.13", "public", instance.ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `workflow_jobs` WHERE `workflow_jobs`.`instance_id` = ? AND `workflow_jobs`.`deleted_at` IS NULL")).
+ WithArgs(instance.ID).
+ WillReturnRows(sqlmock.NewRows([]string{}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instance_status_updates` WHERE `instance_status_updates`.`instance_id` = ? AND `instance_status_updates`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
@@ -337,21 +357,21 @@ func (s *InstancesTestSuite) TestDeleteInstanceDBDeleteErr() {
WillReturnError(fmt.Errorf("mocked delete instance error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeleteInstance(context.Background(), pool.ID, instance.Name)
+ err := s.StoreSQLMocked.DeleteInstance(s.adminCtx, pool.ID, instance.Name)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("deleting instance: mocked delete instance error", err.Error())
+ s.Require().Equal("error deleting instance: mocked delete instance error", err.Error())
}
func (s *InstancesTestSuite) TestAddInstanceEvent() {
storeInstance := s.Fixtures.Instances[0]
statusMsg := "test-status-message"
- err := s.Store.AddInstanceEvent(context.Background(), storeInstance.ID, params.StatusEvent, params.EventInfo, statusMsg)
+ err := s.Store.AddInstanceEvent(s.adminCtx, storeInstance.Name, params.StatusEvent, params.EventInfo, statusMsg)
s.Require().Nil(err)
- instance, err := s.Store.GetInstanceByName(context.Background(), storeInstance.Name)
+ instance, err := s.Store.GetInstance(s.adminCtx, storeInstance.Name)
if err != nil {
s.FailNow(fmt.Sprintf("failed to get db instance: %s", err))
}
@@ -359,24 +379,22 @@ func (s *InstancesTestSuite) TestAddInstanceEvent() {
s.Require().Equal(statusMsg, instance.StatusMessages[0].Message)
}
-func (s *InstancesTestSuite) TestAddInstanceEventInvalidPoolID() {
- err := s.Store.AddInstanceEvent(context.Background(), "dummy-id", params.StatusEvent, params.EventInfo, "dummy-message")
-
- s.Require().Equal("updating instance: parsing id: invalid request", err.Error())
-}
-
func (s *InstancesTestSuite) TestAddInstanceEventDBUpdateErr() {
instance := s.Fixtures.Instances[0]
statusMsg := "test-status-message"
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE id = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
- WithArgs(instance.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
+ WithArgs(instance.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
WillReturnRows(sqlmock.NewRows([]string{"address", "type", "instance_id"}).AddRow("10.10.1.10", "private", instance.ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `workflow_jobs` WHERE `workflow_jobs`.`instance_id` = ? AND `workflow_jobs`.`deleted_at` IS NULL")).
+ WithArgs(instance.ID).
+ WillReturnRows(sqlmock.NewRows([]string{}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instance_status_updates` WHERE `instance_status_updates`.`instance_id` = ? AND `instance_status_updates`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
@@ -391,15 +409,15 @@ func (s *InstancesTestSuite) TestAddInstanceEventDBUpdateErr() {
WillReturnError(fmt.Errorf("mocked add status message error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.AddInstanceEvent(context.Background(), instance.ID, params.StatusEvent, params.EventInfo, statusMsg)
+ err := s.StoreSQLMocked.AddInstanceEvent(s.adminCtx, instance.Name, params.StatusEvent, params.EventInfo, statusMsg)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("adding status message: mocked add status message error", err.Error())
+ s.Require().Equal("error adding status message: mocked add status message error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *InstancesTestSuite) TestUpdateInstance() {
- instance, err := s.Store.UpdateInstance(context.Background(), s.Fixtures.Instances[0].ID, s.Fixtures.UpdateInstanceParams)
+ instance, err := s.Store.UpdateInstance(s.adminCtx, s.Fixtures.Instances[0].Name, s.Fixtures.UpdateInstanceParams)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.UpdateInstanceParams.ProviderID, instance.ProviderID)
@@ -411,23 +429,21 @@ func (s *InstancesTestSuite) TestUpdateInstance() {
s.Require().Equal(s.Fixtures.UpdateInstanceParams.CreateAttempt, instance.CreateAttempt)
}
-func (s *InstancesTestSuite) TestUpdateInstanceInvalidPoolID() {
- _, err := s.Store.UpdateInstance(context.Background(), "dummy-id", params.UpdateInstanceParams{})
-
- s.Require().Equal("updating instance: parsing id: invalid request", err.Error())
-}
-
func (s *InstancesTestSuite) TestUpdateInstanceDBUpdateInstanceErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE id = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
- WithArgs(instance.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
+ WithArgs(instance.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
WillReturnRows(sqlmock.NewRows([]string{"address", "type", "instance_id"}).AddRow("10.10.1.10", "private", instance.ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `workflow_jobs` WHERE `workflow_jobs`.`instance_id` = ? AND `workflow_jobs`.`deleted_at` IS NULL")).
+ WithArgs(instance.ID).
+ WillReturnRows(sqlmock.NewRows([]string{}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instance_status_updates` WHERE `instance_status_updates`.`instance_id` = ? AND `instance_status_updates`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
@@ -438,24 +454,28 @@ func (s *InstancesTestSuite) TestUpdateInstanceDBUpdateInstanceErr() {
WillReturnError(fmt.Errorf("mocked update instance error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateInstance(context.Background(), instance.ID, s.Fixtures.UpdateInstanceParams)
+ _, err := s.StoreSQLMocked.UpdateInstance(s.adminCtx, instance.Name, s.Fixtures.UpdateInstanceParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("updating instance: mocked update instance error", err.Error())
+ s.Require().Equal("error updating instance: mocked update instance error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *InstancesTestSuite) TestUpdateInstanceDBUpdateAddressErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE id = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
- WithArgs(instance.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
+ WithArgs(instance.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
WillReturnRows(sqlmock.NewRows([]string{"address", "type", "instance_id"}).AddRow("10.10.1.10", "private", instance.ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `workflow_jobs` WHERE `workflow_jobs`.`instance_id` = ? AND `workflow_jobs`.`deleted_at` IS NULL")).
+ WithArgs(instance.ID).
+ WillReturnRows(sqlmock.NewRows([]string{}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instance_status_updates` WHERE `instance_status_updates`.`instance_id` = ? AND `instance_status_updates`.`deleted_at` IS NULL")).
WithArgs(instance.ID).
@@ -480,28 +500,28 @@ func (s *InstancesTestSuite) TestUpdateInstanceDBUpdateAddressErr() {
WillReturnError(fmt.Errorf("update addresses mock error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateInstance(context.Background(), instance.ID, s.Fixtures.UpdateInstanceParams)
+ _, err := s.StoreSQLMocked.UpdateInstance(s.adminCtx, instance.Name, s.Fixtures.UpdateInstanceParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("updating addresses: update addresses mock error", err.Error())
+ s.Require().Equal("error updating addresses: update addresses mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *InstancesTestSuite) TestListPoolInstances() {
- instances, err := s.Store.ListPoolInstances(context.Background(), s.Fixtures.Pool.ID)
+ instances, err := s.Store.ListPoolInstances(s.adminCtx, s.Fixtures.Pool.ID)
s.Require().Nil(err)
s.equalInstancesByName(s.Fixtures.Instances, instances)
}
func (s *InstancesTestSuite) TestListPoolInstancesInvalidPoolID() {
- _, err := s.Store.ListPoolInstances(context.Background(), "dummy-pool-id")
+ _, err := s.Store.ListPoolInstances(s.adminCtx, "dummy-pool-id")
- s.Require().Equal("parsing id: invalid request", err.Error())
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *InstancesTestSuite) TestListAllInstances() {
- instances, err := s.Store.ListAllInstances(context.Background())
+ instances, err := s.Store.ListAllInstances(s.adminCtx)
s.Require().Nil(err)
s.equalInstancesByName(s.Fixtures.Instances, instances)
@@ -512,43 +532,43 @@ func (s *InstancesTestSuite) TestListAllInstancesDBFetchErr() {
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE `instances`.`deleted_at` IS NULL")).
WillReturnError(fmt.Errorf("fetch instances mock error"))
- _, err := s.StoreSQLMocked.ListAllInstances(context.Background())
+ _, err := s.StoreSQLMocked.ListAllInstances(s.adminCtx)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching instances: fetch instances mock error", err.Error())
+ s.Require().Equal("error fetching instances: fetch instances mock error", err.Error())
}
func (s *InstancesTestSuite) TestPoolInstanceCount() {
- instancesCount, err := s.Store.PoolInstanceCount(context.Background(), s.Fixtures.Pool.ID)
+ instancesCount, err := s.Store.PoolInstanceCount(s.adminCtx, s.Fixtures.Pool.ID)
s.Require().Nil(err)
s.Require().Equal(int64(len(s.Fixtures.Instances)), instancesCount)
}
func (s *InstancesTestSuite) TestPoolInstanceCountInvalidPoolID() {
- _, err := s.Store.PoolInstanceCount(context.Background(), "dummy-pool-id")
+ _, err := s.Store.PoolInstanceCount(s.adminCtx, "dummy-pool-id")
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error parsing id: invalid request", err.Error())
}
func (s *InstancesTestSuite) TestPoolInstanceCountDBCountErr() {
pool := s.Fixtures.Pool
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
+ WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT count(*) FROM `instances` WHERE pool_id = ? AND `instances`.`deleted_at` IS NULL")).
WithArgs(pool.ID).
WillReturnError(fmt.Errorf("count mock error"))
- _, err := s.StoreSQLMocked.PoolInstanceCount(context.Background(), pool.ID)
+ _, err := s.StoreSQLMocked.PoolInstanceCount(s.adminCtx, pool.ID)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching instance count: count mock error", err.Error())
+ s.Require().Equal("error fetching instance count: count mock error", err.Error())
}
func TestInstTestSuite(t *testing.T) {
diff --git a/database/sql/jobs.go b/database/sql/jobs.go
new file mode 100644
index 00000000..ffa3a7b5
--- /dev/null
+++ b/database/sql/jobs.go
@@ -0,0 +1,439 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+
+ "github.com/google/uuid"
+ "gorm.io/gorm"
+ "gorm.io/gorm/clause"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+)
+
+var _ common.JobsStore = &sqlDatabase{}
+
+func sqlWorkflowJobToParamsJob(job WorkflowJob) (params.Job, error) {
+ labels := []string{}
+ if job.Labels != nil {
+ if err := json.Unmarshal(job.Labels, &labels); err != nil {
+ return params.Job{}, fmt.Errorf("error unmarshaling labels: %w", err)
+ }
+ }
+
+ jobParam := params.Job{
+ ID: job.ID,
+ WorkflowJobID: job.WorkflowJobID,
+ ScaleSetJobID: job.ScaleSetJobID,
+ RunID: job.RunID,
+ Action: job.Action,
+ Status: job.Status,
+ Name: job.Name,
+ Conclusion: job.Conclusion,
+ StartedAt: job.StartedAt,
+ CompletedAt: job.CompletedAt,
+ GithubRunnerID: job.GithubRunnerID,
+ RunnerGroupID: job.RunnerGroupID,
+ RunnerGroupName: job.RunnerGroupName,
+ RepositoryName: job.RepositoryName,
+ RepositoryOwner: job.RepositoryOwner,
+ RepoID: job.RepoID,
+ OrgID: job.OrgID,
+ EnterpriseID: job.EnterpriseID,
+ Labels: labels,
+ CreatedAt: job.CreatedAt,
+ UpdatedAt: job.UpdatedAt,
+ LockedBy: job.LockedBy,
+ }
+
+ if job.InstanceID != nil {
+ jobParam.RunnerName = job.Instance.Name
+ }
+ return jobParam, nil
+}
+
+func (s *sqlDatabase) paramsJobToWorkflowJob(ctx context.Context, job params.Job) (WorkflowJob, error) {
+ asJSON, err := json.Marshal(job.Labels)
+ if err != nil {
+ return WorkflowJob{}, fmt.Errorf("error marshaling labels: %w", err)
+ }
+
+ workflofJob := WorkflowJob{
+ ScaleSetJobID: job.ScaleSetJobID,
+ WorkflowJobID: job.WorkflowJobID,
+ RunID: job.RunID,
+ Action: job.Action,
+ Status: job.Status,
+ Name: job.Name,
+ Conclusion: job.Conclusion,
+ StartedAt: job.StartedAt,
+ CompletedAt: job.CompletedAt,
+ GithubRunnerID: job.GithubRunnerID,
+ RunnerGroupID: job.RunnerGroupID,
+ RunnerGroupName: job.RunnerGroupName,
+ RepositoryName: job.RepositoryName,
+ RepositoryOwner: job.RepositoryOwner,
+ RepoID: job.RepoID,
+ OrgID: job.OrgID,
+ EnterpriseID: job.EnterpriseID,
+ Labels: asJSON,
+ LockedBy: job.LockedBy,
+ }
+
+ if job.RunnerName != "" {
+ instance, err := s.getInstance(s.ctx, job.RunnerName)
+ if err != nil {
+ // This usually is very normal as not all jobs run on our runners.
+ slog.DebugContext(ctx, "failed to get instance by name", "instance_name", job.RunnerName)
+ } else {
+ workflofJob.InstanceID = &instance.ID
+ }
+ }
+
+ return workflofJob, nil
+}
+
+func (s *sqlDatabase) DeleteJob(_ context.Context, jobID int64) (err error) {
+ var workflowJob WorkflowJob
+ q := s.conn.Where("workflow_job_id = ?", jobID).Preload("Instance").First(&workflowJob)
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error fetching job: %w", q.Error)
+ }
+ removedJob, err := sqlWorkflowJobToParamsJob(workflowJob)
+ if err != nil {
+ return fmt.Errorf("error converting job: %w", err)
+ }
+
+ defer func() {
+ if err == nil {
+ if notifyErr := s.sendNotify(common.JobEntityType, common.DeleteOperation, removedJob); notifyErr != nil {
+ slog.With(slog.Any("error", notifyErr)).Error("failed to send notify")
+ }
+ }
+ }()
+ q = s.conn.Delete(&workflowJob)
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error deleting job: %w", q.Error)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) LockJob(_ context.Context, jobID int64, entityID string) error {
+ entityUUID, err := uuid.Parse(entityID)
+ if err != nil {
+ return fmt.Errorf("error parsing entity id: %w", err)
+ }
+ var workflowJob WorkflowJob
+ q := s.conn.Preload("Instance").Where("workflow_job_id = ?", jobID).First(&workflowJob)
+
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return runnerErrors.ErrNotFound
+ }
+ return fmt.Errorf("error fetching job: %w", q.Error)
+ }
+
+ if workflowJob.LockedBy.String() == entityID {
+ // Already locked by us.
+ return nil
+ }
+
+ if workflowJob.LockedBy != uuid.Nil {
+ return runnerErrors.NewConflictError("job is locked by another entity %s", workflowJob.LockedBy.String())
+ }
+
+ workflowJob.LockedBy = entityUUID
+
+ if err := s.conn.Save(&workflowJob).Error; err != nil {
+ return fmt.Errorf("error saving job: %w", err)
+ }
+
+ asParams, err := sqlWorkflowJobToParamsJob(workflowJob)
+ if err != nil {
+ return fmt.Errorf("error converting job: %w", err)
+ }
+ s.sendNotify(common.JobEntityType, common.UpdateOperation, asParams)
+
+ return nil
+}
+
+func (s *sqlDatabase) BreakLockJobIsQueued(_ context.Context, jobID int64) (err error) {
+ var workflowJob WorkflowJob
+ q := s.conn.Clauses(clause.Locking{Strength: "UPDATE"}).Preload("Instance").Where("workflow_job_id = ? and status = ?", jobID, params.JobStatusQueued).First(&workflowJob)
+
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return fmt.Errorf("error fetching job: %w", q.Error)
+ }
+
+ if workflowJob.LockedBy == uuid.Nil {
+ // Job is already unlocked.
+ return nil
+ }
+
+ workflowJob.LockedBy = uuid.Nil
+ if err := s.conn.Save(&workflowJob).Error; err != nil {
+ return fmt.Errorf("error saving job: %w", err)
+ }
+ asParams, err := sqlWorkflowJobToParamsJob(workflowJob)
+ if err != nil {
+ return fmt.Errorf("error converting job: %w", err)
+ }
+ s.sendNotify(common.JobEntityType, common.UpdateOperation, asParams)
+ return nil
+}
+
+func (s *sqlDatabase) UnlockJob(_ context.Context, jobID int64, entityID string) error {
+ var workflowJob WorkflowJob
+ q := s.conn.Clauses(clause.Locking{Strength: "UPDATE"}).Where("workflow_job_id = ?", jobID).First(&workflowJob)
+
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return runnerErrors.ErrNotFound
+ }
+ return fmt.Errorf("error fetching job: %w", q.Error)
+ }
+
+ if workflowJob.LockedBy == uuid.Nil {
+ // Job is already unlocked.
+ return nil
+ }
+
+ if workflowJob.LockedBy != uuid.Nil && workflowJob.LockedBy.String() != entityID {
+ return runnerErrors.NewConflictError("job is locked by another entity %s", workflowJob.LockedBy.String())
+ }
+
+ workflowJob.LockedBy = uuid.Nil
+ if err := s.conn.Save(&workflowJob).Error; err != nil {
+ return fmt.Errorf("error saving job: %w", err)
+ }
+
+ asParams, err := sqlWorkflowJobToParamsJob(workflowJob)
+ if err != nil {
+ return fmt.Errorf("error converting job: %w", err)
+ }
+ s.sendNotify(common.JobEntityType, common.UpdateOperation, asParams)
+ return nil
+}
+
+func (s *sqlDatabase) CreateOrUpdateJob(ctx context.Context, job params.Job) (params.Job, error) {
+ var workflowJob WorkflowJob
+ var err error
+
+ searchField := "workflow_job_id = ?"
+ var searchVal any = job.WorkflowJobID
+ if job.ScaleSetJobID != "" {
+ searchField = "scale_set_job_id = ?"
+ searchVal = job.ScaleSetJobID
+ }
+ q := s.conn.Preload("Instance").Where(searchField, searchVal).First(&workflowJob)
+
+ if q.Error != nil {
+ if !errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return params.Job{}, fmt.Errorf("error fetching job: %w", q.Error)
+ }
+ }
+ var operation common.OperationType
+ if workflowJob.ID != 0 {
+ // Update workflowJob with values from job.
+ operation = common.UpdateOperation
+
+ workflowJob.Status = job.Status
+ workflowJob.Action = job.Action
+ workflowJob.Conclusion = job.Conclusion
+ workflowJob.StartedAt = job.StartedAt
+ workflowJob.CompletedAt = job.CompletedAt
+ workflowJob.GithubRunnerID = job.GithubRunnerID
+ workflowJob.RunnerGroupID = job.RunnerGroupID
+ workflowJob.RunnerGroupName = job.RunnerGroupName
+ if job.RunID != 0 && workflowJob.RunID == 0 {
+ workflowJob.RunID = job.RunID
+ }
+
+ if job.LockedBy != uuid.Nil {
+ workflowJob.LockedBy = job.LockedBy
+ }
+
+ if job.RunnerName != "" {
+ instance, err := s.getInstance(ctx, job.RunnerName)
+ if err == nil {
+ workflowJob.InstanceID = &instance.ID
+ } else {
+ // This usually is very normal as not all jobs run on our runners.
+ slog.DebugContext(ctx, "failed to get instance by name", "instance_name", job.RunnerName)
+ }
+ }
+
+ if job.RepoID != nil {
+ workflowJob.RepoID = job.RepoID
+ }
+
+ if job.OrgID != nil {
+ workflowJob.OrgID = job.OrgID
+ }
+
+ if job.EnterpriseID != nil {
+ workflowJob.EnterpriseID = job.EnterpriseID
+ }
+ if err := s.conn.Save(&workflowJob).Error; err != nil {
+ return params.Job{}, fmt.Errorf("error saving job: %w", err)
+ }
+ } else {
+ operation = common.CreateOperation
+
+ workflowJob, err = s.paramsJobToWorkflowJob(ctx, job)
+ if err != nil {
+ return params.Job{}, fmt.Errorf("error converting job: %w", err)
+ }
+ if err := s.conn.Create(&workflowJob).Error; err != nil {
+ return params.Job{}, fmt.Errorf("error creating job: %w", err)
+ }
+ }
+
+ asParams, err := sqlWorkflowJobToParamsJob(workflowJob)
+ if err != nil {
+ return params.Job{}, fmt.Errorf("error converting job: %w", err)
+ }
+ s.sendNotify(common.JobEntityType, operation, asParams)
+
+ return asParams, nil
+}
+
+// ListJobsByStatus lists all jobs for a given status.
+func (s *sqlDatabase) ListJobsByStatus(_ context.Context, status params.JobStatus) ([]params.Job, error) {
+ var jobs []WorkflowJob
+ query := s.conn.Model(&WorkflowJob{}).Preload("Instance").Where("status = ?", status)
+
+ if err := query.Find(&jobs); err.Error != nil {
+ return nil, err.Error
+ }
+
+ ret := make([]params.Job, len(jobs))
+ for idx, job := range jobs {
+ jobParam, err := sqlWorkflowJobToParamsJob(job)
+ if err != nil {
+ return nil, fmt.Errorf("error converting job: %w", err)
+ }
+ ret[idx] = jobParam
+ }
+ return ret, nil
+}
+
+// ListEntityJobsByStatus lists all jobs for a given entity type and id.
+func (s *sqlDatabase) ListEntityJobsByStatus(_ context.Context, entityType params.ForgeEntityType, entityID string, status params.JobStatus) ([]params.Job, error) {
+ u, err := uuid.Parse(entityID)
+ if err != nil {
+ return nil, err
+ }
+
+ var jobs []WorkflowJob
+ query := s.conn.
+ Model(&WorkflowJob{}).
+ Preload("Instance").
+ Where("status = ?", status).
+ Where("workflow_job_id > 0")
+
+ switch entityType {
+ case params.ForgeEntityTypeOrganization:
+ query = query.Where("org_id = ?", u)
+ case params.ForgeEntityTypeRepository:
+ query = query.Where("repo_id = ?", u)
+ case params.ForgeEntityTypeEnterprise:
+ query = query.Where("enterprise_id = ?", u)
+ }
+
+ if err := query.Find(&jobs); err.Error != nil {
+ if errors.Is(err.Error, gorm.ErrRecordNotFound) {
+ return []params.Job{}, nil
+ }
+ return nil, err.Error
+ }
+
+ ret := make([]params.Job, len(jobs))
+ for idx, job := range jobs {
+ jobParam, err := sqlWorkflowJobToParamsJob(job)
+ if err != nil {
+ return nil, fmt.Errorf("error converting job: %w", err)
+ }
+ ret[idx] = jobParam
+ }
+ return ret, nil
+}
+
+func (s *sqlDatabase) ListAllJobs(_ context.Context) ([]params.Job, error) {
+ var jobs []WorkflowJob
+ query := s.conn.Model(&WorkflowJob{})
+
+ if err := query.Preload("Instance").Find(&jobs); err.Error != nil {
+ if errors.Is(err.Error, gorm.ErrRecordNotFound) {
+ return []params.Job{}, nil
+ }
+ return nil, err.Error
+ }
+
+ ret := make([]params.Job, len(jobs))
+ for idx, job := range jobs {
+ jobParam, err := sqlWorkflowJobToParamsJob(job)
+ if err != nil {
+ return nil, fmt.Errorf("error converting job: %w", err)
+ }
+ ret[idx] = jobParam
+ }
+ return ret, nil
+}
+
+// GetJobByID gets a job by id.
+func (s *sqlDatabase) GetJobByID(_ context.Context, jobID int64) (params.Job, error) {
+ var job WorkflowJob
+ query := s.conn.Model(&WorkflowJob{}).Preload("Instance").Where("workflow_job_id = ?", jobID)
+
+ if err := query.First(&job); err.Error != nil {
+ if errors.Is(err.Error, gorm.ErrRecordNotFound) {
+ return params.Job{}, runnerErrors.ErrNotFound
+ }
+ return params.Job{}, err.Error
+ }
+
+ return sqlWorkflowJobToParamsJob(job)
+}
+
+// DeleteCompletedJobs deletes all completed jobs.
+func (s *sqlDatabase) DeleteCompletedJobs(_ context.Context) error {
+ query := s.conn.Model(&WorkflowJob{}).Where("status = ?", params.JobStatusCompleted)
+
+ if err := query.Unscoped().Delete(&WorkflowJob{}); err.Error != nil {
+ if errors.Is(err.Error, gorm.ErrRecordNotFound) {
+ return nil
+ }
+ return err.Error
+ }
+
+ return nil
+}
diff --git a/database/sql/models.go b/database/sql/models.go
index a46f0e54..d3cb044a 100644
--- a/database/sql/models.go
+++ b/database/sql/models.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Cloudbase Solutions SRL
+// Copyright 2025 Cloudbase Solutions SRL
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
@@ -15,15 +15,15 @@
package sql
import (
+ "fmt"
"time"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/providers/common"
-
- "github.com/pkg/errors"
- uuid "github.com/satori/go.uuid"
+ "github.com/google/uuid"
"gorm.io/datatypes"
"gorm.io/gorm"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/params"
)
type Base struct {
@@ -33,24 +33,40 @@ type Base struct {
DeletedAt gorm.DeletedAt `gorm:"index"`
}
-func (b *Base) BeforeCreate(tx *gorm.DB) error {
- emptyId := uuid.UUID{}
- if b.ID != emptyId {
+func (b *Base) BeforeCreate(_ *gorm.DB) error {
+ emptyID := uuid.UUID{}
+ if b.ID != emptyID {
return nil
}
- newID, err := uuid.NewV4()
+ newID, err := uuid.NewRandom()
if err != nil {
- return errors.Wrap(err, "generating id")
+ return fmt.Errorf("error generating id: %w", err)
}
b.ID = newID
return nil
}
+type ControllerInfo struct {
+ Base
+
+ ControllerID uuid.UUID
+
+ CallbackURL string
+ MetadataURL string
+ WebhookBaseURL string
+ // MinimumJobAgeBackoff is the minimum time that a job must be in the queue
+ // before GARM will attempt to allocate a runner to service it. This backoff
+ // is useful if you have idle runners in various pools that could potentially
+ // pick up the job. GARM would allow this amount of time for runners to react
+ // before spinning up a new one and potentially having to scale down later.
+ MinimumJobAgeBackoff uint
+}
+
type Tag struct {
Base
Name string `gorm:"type:varchar(64);uniqueIndex"`
- Pools []*Pool `gorm:"many2many:pool_tags;"`
+ Pools []*Pool `gorm:"many2many:pool_tags;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
}
type Pool struct {
@@ -63,9 +79,9 @@ type Pool struct {
RunnerBootstrapTimeout uint
Image string `gorm:"index:idx_pool_type"`
Flavor string `gorm:"index:idx_pool_type"`
- OSType params.OSType
- OSArch params.OSArch
- Tags []*Tag `gorm:"many2many:pool_tags;"`
+ OSType commonParams.OSType
+ OSArch commonParams.OSArch
+ Tags []*Tag `gorm:"many2many:pool_tags;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
Enabled bool
// ExtraSpecs is an opaque json that gets sent to the provider
// as part of the bootstrap params for instances. It can contain
@@ -73,44 +89,165 @@ type Pool struct {
ExtraSpecs datatypes.JSON
GitHubRunnerGroup string
- RepoID uuid.UUID `gorm:"index"`
- Repository Repository `gorm:"foreignKey:RepoID"`
+ RepoID *uuid.UUID `gorm:"index"`
+ Repository Repository `gorm:"foreignKey:RepoID;"`
- OrgID uuid.UUID `gorm:"index"`
+ OrgID *uuid.UUID `gorm:"index"`
Organization Organization `gorm:"foreignKey:OrgID"`
- EnterpriseID uuid.UUID `gorm:"index"`
+ EnterpriseID *uuid.UUID `gorm:"index"`
Enterprise Enterprise `gorm:"foreignKey:EnterpriseID"`
Instances []Instance `gorm:"foreignKey:PoolID"`
+ Priority uint `gorm:"index:idx_pool_priority"`
+}
+
+// ScaleSet represents a github scale set. Scale sets are almost identical to pools with a few
+// notable exceptions:
+// - Labels are no longer relevant
+// - Workflows will use the scaleset name to target runners.
+// - A scale set is a stand alone unit. If a workflow targets a scale set, no other runner will pick up that job.
+type ScaleSet struct {
+ gorm.Model
+
+ // ScaleSetID is the github ID of the scale set. This field may not be set if
+ // the scale set was ceated in GARM but has not yet been created in GitHub.
+ // The scale set ID is also not globally unique. It is only unique within the context
+ // of an entity.
+ ScaleSetID int `gorm:"index:idx_scale_set"`
+ Name string `gorm:"unique_index:idx_name"`
+ GitHubRunnerGroup string `gorm:"unique_index:idx_name"`
+ DisableUpdate bool
+
+ // State stores the provisioning state of the scale set in GitHub
+ State params.ScaleSetState
+ // ExtendedState stores a more detailed message regarding the State.
+ // If an error occurs, the reason for the error will be stored here.
+ ExtendedState string
+
+ ProviderName string
+ RunnerPrefix string
+ MaxRunners uint
+ MinIdleRunners uint
+ RunnerBootstrapTimeout uint
+ Image string
+ Flavor string
+ OSType commonParams.OSType
+ OSArch commonParams.OSArch
+ Enabled bool
+ LastMessageID int64
+ DesiredRunnerCount int
+ // ExtraSpecs is an opaque json that gets sent to the provider
+ // as part of the bootstrap params for instances. It can contain
+ // any kind of data needed by providers.
+ ExtraSpecs datatypes.JSON
+
+ RepoID *uuid.UUID `gorm:"index"`
+ Repository Repository `gorm:"foreignKey:RepoID;"`
+
+ OrgID *uuid.UUID `gorm:"index"`
+ Organization Organization `gorm:"foreignKey:OrgID"`
+
+ EnterpriseID *uuid.UUID `gorm:"index"`
+ Enterprise Enterprise `gorm:"foreignKey:EnterpriseID"`
+
+ Instances []Instance `gorm:"foreignKey:ScaleSetFkID"`
+}
+
+type RepositoryEvent struct {
+ gorm.Model
+
+ EventType params.EventType
+ EventLevel params.EventLevel
+ Message string `gorm:"type:text"`
+
+ RepoID uuid.UUID `gorm:"index:idx_repo_event"`
+ Repo Repository `gorm:"foreignKey:RepoID"`
}
type Repository struct {
Base
- CredentialsName string
- Owner string `gorm:"index:idx_owner_nocase,unique,collate:nocase"`
- Name string `gorm:"index:idx_owner_nocase,unique,collate:nocase"`
- WebhookSecret []byte
- Pools []Pool `gorm:"foreignKey:RepoID"`
+ CredentialsID *uint `gorm:"index"`
+ Credentials GithubCredentials `gorm:"foreignKey:CredentialsID;constraint:OnDelete:SET NULL"`
+
+ GiteaCredentialsID *uint `gorm:"index"`
+ GiteaCredentials GiteaCredentials `gorm:"foreignKey:GiteaCredentialsID;constraint:OnDelete:SET NULL"`
+
+ Owner string `gorm:"index:idx_owner_nocase,unique,collate:nocase"`
+ Name string `gorm:"index:idx_owner_nocase,unique,collate:nocase"`
+ WebhookSecret []byte
+ Pools []Pool `gorm:"foreignKey:RepoID"`
+ ScaleSets []ScaleSet `gorm:"foreignKey:RepoID"`
+ Jobs []WorkflowJob `gorm:"foreignKey:RepoID;constraint:OnDelete:SET NULL"`
+ PoolBalancerType params.PoolBalancerType `gorm:"type:varchar(64)"`
+
+ EndpointName *string `gorm:"index:idx_owner_nocase,unique,collate:nocase"`
+ Endpoint GithubEndpoint `gorm:"foreignKey:EndpointName;constraint:OnDelete:SET NULL"`
+
+ Events []RepositoryEvent `gorm:"foreignKey:RepoID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
}
+type OrganizationEvent struct {
+ gorm.Model
+
+ EventType params.EventType
+ EventLevel params.EventLevel
+ Message string `gorm:"type:text"`
+
+ OrgID uuid.UUID `gorm:"index:idx_org_event"`
+ Org Organization `gorm:"foreignKey:OrgID"`
+}
type Organization struct {
Base
- CredentialsName string
- Name string `gorm:"index:idx_org_name_nocase,collate:nocase"`
- WebhookSecret []byte
- Pools []Pool `gorm:"foreignKey:OrgID"`
+ CredentialsID *uint `gorm:"index"`
+ Credentials GithubCredentials `gorm:"foreignKey:CredentialsID;constraint:OnDelete:SET NULL"`
+
+ GiteaCredentialsID *uint `gorm:"index"`
+ GiteaCredentials GiteaCredentials `gorm:"foreignKey:GiteaCredentialsID;constraint:OnDelete:SET NULL"`
+
+ Name string `gorm:"index:idx_org_name_nocase,collate:nocase"`
+ WebhookSecret []byte
+ Pools []Pool `gorm:"foreignKey:OrgID"`
+ ScaleSet []ScaleSet `gorm:"foreignKey:OrgID"`
+ Jobs []WorkflowJob `gorm:"foreignKey:OrgID;constraint:OnDelete:SET NULL"`
+ PoolBalancerType params.PoolBalancerType `gorm:"type:varchar(64)"`
+
+ EndpointName *string `gorm:"index:idx_org_name_nocase,collate:nocase"`
+ Endpoint GithubEndpoint `gorm:"foreignKey:EndpointName;constraint:OnDelete:SET NULL"`
+
+ Events []OrganizationEvent `gorm:"foreignKey:OrgID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
+}
+
+type EnterpriseEvent struct {
+ gorm.Model
+
+ EventType params.EventType
+ EventLevel params.EventLevel
+ Message string `gorm:"type:text"`
+
+ EnterpriseID uuid.UUID `gorm:"index:idx_enterprise_event"`
+ Enterprise Enterprise `gorm:"foreignKey:EnterpriseID"`
}
type Enterprise struct {
Base
- CredentialsName string
- Name string `gorm:"index:idx_ent_name_nocase,collate:nocase"`
- WebhookSecret []byte
- Pools []Pool `gorm:"foreignKey:EnterpriseID"`
+ CredentialsID *uint `gorm:"index"`
+ Credentials GithubCredentials `gorm:"foreignKey:CredentialsID;constraint:OnDelete:SET NULL"`
+
+ Name string `gorm:"index:idx_ent_name_nocase,collate:nocase"`
+ WebhookSecret []byte
+ Pools []Pool `gorm:"foreignKey:EnterpriseID"`
+ ScaleSet []ScaleSet `gorm:"foreignKey:EnterpriseID"`
+ Jobs []WorkflowJob `gorm:"foreignKey:EnterpriseID;constraint:OnDelete:SET NULL"`
+ PoolBalancerType params.PoolBalancerType `gorm:"type:varchar(64)"`
+
+ EndpointName *string `gorm:"index:idx_ent_name_nocase,collate:nocase"`
+ Endpoint GithubEndpoint `gorm:"foreignKey:EndpointName;constraint:OnDelete:SET NULL"`
+
+ Events []EnterpriseEvent `gorm:"foreignKey:EnterpriseID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
}
type Address struct {
@@ -130,8 +267,8 @@ type InstanceStatusUpdate struct {
EventLevel params.EventLevel
Message string `gorm:"type:text"`
- InstanceID uuid.UUID
- Instance Instance `gorm:"foreignKey:InstanceID"`
+ InstanceID uuid.UUID `gorm:"index:idx_instance_status_updates_instance_id"`
+ Instance Instance `gorm:"foreignKey:InstanceID"`
}
type Instance struct {
@@ -140,39 +277,158 @@ type Instance struct {
ProviderID *string `gorm:"uniqueIndex"`
Name string `gorm:"uniqueIndex"`
AgentID int64
- OSType params.OSType
- OSArch params.OSArch
+ OSType commonParams.OSType
+ OSArch commonParams.OSArch
OSName string
OSVersion string
- Addresses []Address `gorm:"foreignKey:InstanceID"`
- Status common.InstanceStatus
- RunnerStatus common.RunnerStatus
+ Addresses []Address `gorm:"foreignKey:InstanceID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
+ Status commonParams.InstanceStatus
+ RunnerStatus params.RunnerStatus
CallbackURL string
MetadataURL string
ProviderFault []byte `gorm:"type:longblob"`
CreateAttempt int
TokenFetched bool
+ JitConfiguration []byte `gorm:"type:longblob"`
GitHubRunnerGroup string
+ AditionalLabels datatypes.JSON
- PoolID uuid.UUID
+ PoolID *uuid.UUID
Pool Pool `gorm:"foreignKey:PoolID"`
- StatusMessages []InstanceStatusUpdate `gorm:"foreignKey:InstanceID"`
+ ScaleSetFkID *uint
+ ScaleSet ScaleSet `gorm:"foreignKey:ScaleSetFkID"`
+
+ StatusMessages []InstanceStatusUpdate `gorm:"foreignKey:InstanceID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
+
+ Job *WorkflowJob `gorm:"foreignKey:InstanceID;constraint:OnDelete:CASCADE,OnUpdate:CASCADE;"`
}
type User struct {
Base
- Username string `gorm:"uniqueIndex;varchar(64)"`
- FullName string `gorm:"type:varchar(254)"`
- Email string `gorm:"type:varchar(254);unique;index:idx_email"`
- Password string `gorm:"type:varchar(60)"`
- IsAdmin bool
- Enabled bool
+ Username string `gorm:"uniqueIndex;varchar(64)"`
+ FullName string `gorm:"type:varchar(254)"`
+ Email string `gorm:"type:varchar(254);unique;index:idx_email"`
+ Password string `gorm:"type:varchar(60)"`
+ Generation uint
+ IsAdmin bool
+ Enabled bool
}
-type ControllerInfo struct {
- Base
+type WorkflowJob struct {
+ // ID is the ID of the job.
+ ID int64 `gorm:"index"`
- ControllerID uuid.UUID
+ // WorkflowJobID is the ID of the workflow job.
+ WorkflowJobID int64 `gorm:"index:workflow_job_id_idx"`
+ // ScaleSetJobID is the job ID for a scaleset job.
+ ScaleSetJobID string `gorm:"index:scaleset_job_id_idx"`
+
+ // RunID is the ID of the workflow run. A run may have multiple jobs.
+ RunID int64
+ // Action is the specific activity that triggered the event.
+ Action string `gorm:"type:varchar(254);index"`
+ // Conclusion is the outcome of the job.
+ // Possible values: "success", "failure", "neutral", "cancelled", "skipped",
+ // "timed_out", "action_required"
+ Conclusion string
+ // Status is the phase of the lifecycle that the job is currently in.
+ // "queued", "in_progress" and "completed".
+ Status string
+ // Name is the name if the job that was triggered.
+ Name string
+
+ StartedAt time.Time
+ CompletedAt time.Time
+
+ GithubRunnerID int64
+
+ InstanceID *uuid.UUID `gorm:"index:idx_instance_job"`
+ Instance Instance `gorm:"foreignKey:InstanceID"`
+
+ RunnerGroupID int64
+ RunnerGroupName string
+
+ // repository in which the job was triggered.
+ RepositoryName string
+ RepositoryOwner string
+
+ Labels datatypes.JSON
+
+ // The entity that received the hook.
+ //
+ // Webhooks may be configured on the repo, the org and/or the enterprise.
+ // If we only configure a repo to use garm, we'll only ever receive a
+ // webhook from the repo. But if we configure the parent org of the repo and
+ // the parent enterprise of the org to use garm, a webhook will be sent for each
+ // entity type, in response to one workflow event. Thus, we will get 3 webhooks
+ // with the same run_id and job id. Record all involved entities in the same job
+ // if we have them configured in garm.
+ RepoID *uuid.UUID `gorm:"index"`
+ Repository Repository `gorm:"foreignKey:RepoID"`
+
+ OrgID *uuid.UUID `gorm:"index"`
+ Organization Organization `gorm:"foreignKey:OrgID"`
+
+ EnterpriseID *uuid.UUID `gorm:"index"`
+ Enterprise Enterprise `gorm:"foreignKey:EnterpriseID"`
+
+ LockedBy uuid.UUID
+
+ CreatedAt time.Time
+ UpdatedAt time.Time
+ DeletedAt gorm.DeletedAt `gorm:"index"`
+}
+
+type GithubEndpoint struct {
+ Name string `gorm:"type:varchar(64) collate nocase;primary_key;"`
+ CreatedAt time.Time
+ UpdatedAt time.Time
+ DeletedAt gorm.DeletedAt `gorm:"index"`
+
+ EndpointType params.EndpointType `gorm:"index:idx_endpoint_type"`
+
+ Description string `gorm:"type:text"`
+ APIBaseURL string `gorm:"type:text collate nocase"`
+ UploadBaseURL string `gorm:"type:text collate nocase"`
+ BaseURL string `gorm:"type:text collate nocase"`
+ CACertBundle []byte `gorm:"type:longblob"`
+}
+
+type GithubCredentials struct {
+ gorm.Model
+
+ Name string `gorm:"index:idx_github_credentials,unique;type:varchar(64) collate nocase"`
+ UserID *uuid.UUID `gorm:"index:idx_github_credentials,unique"`
+ User User `gorm:"foreignKey:UserID"`
+
+ Description string `gorm:"type:text"`
+ AuthType params.ForgeAuthType `gorm:"index"`
+ Payload []byte `gorm:"type:longblob"`
+
+ Endpoint GithubEndpoint `gorm:"foreignKey:EndpointName"`
+ EndpointName *string `gorm:"index"`
+
+ Repositories []Repository `gorm:"foreignKey:CredentialsID"`
+ Organizations []Organization `gorm:"foreignKey:CredentialsID"`
+ Enterprises []Enterprise `gorm:"foreignKey:CredentialsID"`
+}
+
+type GiteaCredentials struct {
+ gorm.Model
+
+ Name string `gorm:"index:idx_gitea_credentials,unique;type:varchar(64) collate nocase"`
+ UserID *uuid.UUID `gorm:"index:idx_gitea_credentials,unique"`
+ User User `gorm:"foreignKey:UserID"`
+
+ Description string `gorm:"type:text"`
+ AuthType params.ForgeAuthType `gorm:"index"`
+ Payload []byte `gorm:"type:longblob"`
+
+ Endpoint GithubEndpoint `gorm:"foreignKey:EndpointName"`
+ EndpointName *string `gorm:"index"`
+
+ Repositories []Repository `gorm:"foreignKey:GiteaCredentialsID"`
+ Organizations []Organization `gorm:"foreignKey:GiteaCredentialsID"`
}
diff --git a/database/sql/organizations.go b/database/sql/organizations.go
index f6bc2ed5..22be6272 100644
--- a/database/sql/organizations.go
+++ b/database/sql/organizations.go
@@ -16,324 +16,237 @@ package sql
import (
"context"
+ "errors"
"fmt"
+ "log/slog"
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/util"
-
- "github.com/pkg/errors"
- uuid "github.com/satori/go.uuid"
- "gorm.io/datatypes"
+ "github.com/google/uuid"
"gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm-provider-common/util"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
)
-func (s *sqlDatabase) CreateOrganization(ctx context.Context, name, credentialsName, webhookSecret string) (params.Organization, error) {
+func (s *sqlDatabase) CreateOrganization(ctx context.Context, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (param params.Organization, err error) {
if webhookSecret == "" {
return params.Organization{}, errors.New("creating org: missing secret")
}
- secret, err := util.Aes256EncodeString(webhookSecret, s.cfg.Passphrase)
+ secret, err := util.Seal([]byte(webhookSecret), []byte(s.cfg.Passphrase))
if err != nil {
- return params.Organization{}, fmt.Errorf("failed to encrypt string")
+ return params.Organization{}, fmt.Errorf("error encoding secret: %w", err)
}
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.OrganizationEntityType, common.CreateOperation, param)
+ }
+ }()
newOrg := Organization{
- Name: name,
- WebhookSecret: secret,
- CredentialsName: credentialsName,
+ Name: name,
+ WebhookSecret: secret,
+ PoolBalancerType: poolBalancerType,
}
- q := s.conn.Create(&newOrg)
- if q.Error != nil {
- return params.Organization{}, errors.Wrap(q.Error, "creating org")
- }
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ switch credentials.ForgeType {
+ case params.GithubEndpointType:
+ newOrg.CredentialsID = &credentials.ID
+ case params.GiteaEndpointType:
+ newOrg.GiteaCredentialsID = &credentials.ID
+ default:
+ return fmt.Errorf("unsupported credentials type: %w", runnerErrors.ErrBadRequest)
+ }
- param, err := s.sqlToCommonOrganization(newOrg)
+ newOrg.EndpointName = &credentials.Endpoint.Name
+ q := tx.Create(&newOrg)
+ if q.Error != nil {
+ return fmt.Errorf("error creating org: %w", q.Error)
+ }
+ return nil
+ })
if err != nil {
- return params.Organization{}, errors.Wrap(err, "creating org")
+ return params.Organization{}, fmt.Errorf("error creating org: %w", err)
}
- param.WebhookSecret = webhookSecret
- return param, nil
+ ret, err := s.GetOrganizationByID(ctx, newOrg.ID.String())
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error creating org: %w", err)
+ }
+
+ return ret, nil
}
-func (s *sqlDatabase) GetOrganization(ctx context.Context, name string) (params.Organization, error) {
- org, err := s.getOrg(ctx, name)
+func (s *sqlDatabase) GetOrganization(ctx context.Context, name, endpointName string) (params.Organization, error) {
+ org, err := s.getOrg(ctx, name, endpointName)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching org")
+ return params.Organization{}, fmt.Errorf("error fetching org: %w", err)
}
- param, err := s.sqlToCommonOrganization(org)
+ param, err := s.sqlToCommonOrganization(org, true)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching org")
+ return params.Organization{}, fmt.Errorf("error fetching org: %w", err)
}
return param, nil
}
-func (s *sqlDatabase) ListOrganizations(ctx context.Context) ([]params.Organization, error) {
+func (s *sqlDatabase) ListOrganizations(_ context.Context, filter params.OrganizationFilter) ([]params.Organization, error) {
var orgs []Organization
- q := s.conn.Find(&orgs)
+ q := s.conn.
+ Preload("Credentials").
+ Preload("GiteaCredentials").
+ Preload("Credentials.Endpoint").
+ Preload("GiteaCredentials.Endpoint").
+ Preload("Endpoint")
+
+ if filter.Name != "" {
+ q = q.Where("name = ?", filter.Name)
+ }
+
+ if filter.Endpoint != "" {
+ q = q.Where("endpoint_name = ?", filter.Endpoint)
+ }
+ q = q.Find(&orgs)
if q.Error != nil {
- return []params.Organization{}, errors.Wrap(q.Error, "fetching org from database")
+ return []params.Organization{}, fmt.Errorf("error fetching org from database: %w", q.Error)
}
ret := make([]params.Organization, len(orgs))
for idx, val := range orgs {
var err error
- ret[idx], err = s.sqlToCommonOrganization(val)
+ ret[idx], err = s.sqlToCommonOrganization(val, true)
if err != nil {
- return nil, errors.Wrap(err, "fetching org")
+ return nil, fmt.Errorf("error fetching org: %w", err)
}
}
return ret, nil
}
-func (s *sqlDatabase) DeleteOrganization(ctx context.Context, orgID string) error {
- org, err := s.getOrgByID(ctx, orgID)
+func (s *sqlDatabase) DeleteOrganization(ctx context.Context, orgID string) (err error) {
+ org, err := s.getOrgByID(ctx, s.conn, orgID, "Endpoint", "Credentials", "Credentials.Endpoint", "GiteaCredentials", "GiteaCredentials.Endpoint")
if err != nil {
- return errors.Wrap(err, "fetching org")
+ return fmt.Errorf("error fetching org: %w", err)
}
+ defer func(org Organization) {
+ if err == nil {
+ asParam, innerErr := s.sqlToCommonOrganization(org, true)
+ if innerErr == nil {
+ s.sendNotify(common.OrganizationEntityType, common.DeleteOperation, asParam)
+ } else {
+ slog.With(slog.Any("error", innerErr)).ErrorContext(ctx, "error sending delete notification", "org", orgID)
+ }
+ }
+ }(org)
+
q := s.conn.Unscoped().Delete(&org)
if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting org")
+ return fmt.Errorf("error deleting org: %w", q.Error)
}
return nil
}
-func (s *sqlDatabase) UpdateOrganization(ctx context.Context, orgID string, param params.UpdateRepositoryParams) (params.Organization, error) {
- org, err := s.getOrgByID(ctx, orgID)
- if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching org")
- }
-
- if param.CredentialsName != "" {
- org.CredentialsName = param.CredentialsName
- }
-
- if param.WebhookSecret != "" {
- secret, err := util.Aes256EncodeString(param.WebhookSecret, s.cfg.Passphrase)
- if err != nil {
- return params.Organization{}, fmt.Errorf("saving org: failed to encrypt string: %w", err)
+func (s *sqlDatabase) UpdateOrganization(ctx context.Context, orgID string, param params.UpdateEntityParams) (paramOrg params.Organization, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.OrganizationEntityType, common.UpdateOperation, paramOrg)
+ }
+ }()
+ var org Organization
+ var creds GithubCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var err error
+ org, err = s.getOrgByID(ctx, tx, orgID)
+ if err != nil {
+ return fmt.Errorf("error fetching org: %w", err)
+ }
+ if org.EndpointName == nil {
+ return fmt.Errorf("error org has no endpoint: %w", runnerErrors.ErrUnprocessable)
}
- org.WebhookSecret = secret
- }
- q := s.conn.Save(&org)
- if q.Error != nil {
- return params.Organization{}, errors.Wrap(q.Error, "saving org")
- }
+ if param.CredentialsName != "" {
+ creds, err = s.getGithubCredentialsByName(ctx, tx, param.CredentialsName, false)
+ if err != nil {
+ return fmt.Errorf("error fetching credentials: %w", err)
+ }
+ if creds.EndpointName == nil {
+ return fmt.Errorf("error credentials have no endpoint: %w", runnerErrors.ErrUnprocessable)
+ }
- newParams, err := s.sqlToCommonOrganization(org)
+ if *creds.EndpointName != *org.EndpointName {
+ return fmt.Errorf("error endpoint mismatch: %w", runnerErrors.ErrBadRequest)
+ }
+ org.CredentialsID = &creds.ID
+ }
+
+ if param.WebhookSecret != "" {
+ secret, err := util.Seal([]byte(param.WebhookSecret), []byte(s.cfg.Passphrase))
+ if err != nil {
+ return fmt.Errorf("saving org: failed to encrypt string: %w", err)
+ }
+ org.WebhookSecret = secret
+ }
+
+ if param.PoolBalancerType != "" {
+ org.PoolBalancerType = param.PoolBalancerType
+ }
+
+ q := tx.Save(&org)
+ if q.Error != nil {
+ return fmt.Errorf("error saving org: %w", q.Error)
+ }
+
+ return nil
+ })
if err != nil {
- return params.Organization{}, errors.Wrap(err, "saving org")
+ return params.Organization{}, fmt.Errorf("error saving org: %w", err)
}
- return newParams, nil
+
+ org, err = s.getOrgByID(ctx, s.conn, orgID, "Endpoint", "Credentials", "Credentials.Endpoint", "GiteaCredentials", "GiteaCredentials.Endpoint")
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error updating enterprise: %w", err)
+ }
+ paramOrg, err = s.sqlToCommonOrganization(org, true)
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error saving org: %w", err)
+ }
+ return paramOrg, nil
}
func (s *sqlDatabase) GetOrganizationByID(ctx context.Context, orgID string) (params.Organization, error) {
- org, err := s.getOrgByID(ctx, orgID, "Pools")
+ preloadList := []string{
+ "Pools",
+ "Credentials",
+ "Endpoint",
+ "Credentials.Endpoint",
+ "GiteaCredentials",
+ "GiteaCredentials.Endpoint",
+ "Events",
+ }
+ org, err := s.getOrgByID(ctx, s.conn, orgID, preloadList...)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching org")
+ return params.Organization{}, fmt.Errorf("error fetching org: %w", err)
}
- param, err := s.sqlToCommonOrganization(org)
+ param, err := s.sqlToCommonOrganization(org, true)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching enterprise")
+ return params.Organization{}, fmt.Errorf("error fetching org: %w", err)
}
return param, nil
}
-func (s *sqlDatabase) CreateOrganizationPool(ctx context.Context, orgId string, param params.CreatePoolParams) (params.Pool, error) {
- if len(param.Tags) == 0 {
- return params.Pool{}, runnerErrors.NewBadRequestError("no tags specified")
- }
-
- org, err := s.getOrgByID(ctx, orgId)
+func (s *sqlDatabase) getOrgByID(_ context.Context, db *gorm.DB, id string, preload ...string) (Organization, error) {
+ u, err := uuid.Parse(id)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching org")
- }
-
- newPool := Pool{
- ProviderName: param.ProviderName,
- MaxRunners: param.MaxRunners,
- MinIdleRunners: param.MinIdleRunners,
- RunnerPrefix: param.GetRunnerPrefix(),
- Image: param.Image,
- Flavor: param.Flavor,
- OSType: param.OSType,
- OSArch: param.OSArch,
- OrgID: org.ID,
- Enabled: param.Enabled,
- RunnerBootstrapTimeout: param.RunnerBootstrapTimeout,
- }
-
- if len(param.ExtraSpecs) > 0 {
- newPool.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
- }
-
- _, err = s.getOrgPoolByUniqueFields(ctx, orgId, newPool.ProviderName, newPool.Image, newPool.Flavor)
- if err != nil {
- if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Pool{}, errors.Wrap(err, "creating pool")
- }
- } else {
- return params.Pool{}, runnerErrors.NewConflictError("pool with the same image and flavor already exists on this provider")
- }
-
- tags := []Tag{}
- for _, val := range param.Tags {
- t, err := s.getOrCreateTag(val)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching tag")
- }
- tags = append(tags, t)
- }
-
- q := s.conn.Create(&newPool)
- if q.Error != nil {
- return params.Pool{}, errors.Wrap(q.Error, "adding pool")
- }
-
- for _, tt := range tags {
- if err := s.conn.Model(&newPool).Association("Tags").Append(&tt); err != nil {
- return params.Pool{}, errors.Wrap(err, "saving tag")
- }
- }
-
- pool, err := s.getPoolByID(ctx, newPool.ID.String(), "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) ListOrgPools(ctx context.Context, orgID string) ([]params.Pool, error) {
- pools, err := s.getOrgPools(ctx, orgID, "Tags")
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
-
- ret := make([]params.Pool, len(pools))
- for idx, pool := range pools {
- ret[idx] = s.sqlToCommonPool(pool)
- }
-
- return ret, nil
-}
-
-func (s *sqlDatabase) GetOrganizationPool(ctx context.Context, orgID, poolID string) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.OrganizationPool, orgID, poolID, "Tags", "Instances")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) DeleteOrganizationPool(ctx context.Context, orgID, poolID string) error {
- pool, err := s.getEntityPool(ctx, params.OrganizationPool, orgID, poolID)
- if err != nil {
- return errors.Wrap(err, "looking up org pool")
- }
- q := s.conn.Unscoped().Delete(&pool)
- if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting pool")
- }
- return nil
-}
-
-func (s *sqlDatabase) FindOrganizationPoolByTags(ctx context.Context, orgID string, tags []string) (params.Pool, error) {
- pool, err := s.findPoolByTags(orgID, "org_id", tags)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool, nil
-}
-
-func (s *sqlDatabase) ListOrgInstances(ctx context.Context, orgID string) ([]params.Instance, error) {
- pools, err := s.getOrgPools(ctx, orgID, "Instances")
- if err != nil {
- return nil, errors.Wrap(err, "fetching org")
- }
- ret := []params.Instance{}
- for _, pool := range pools {
- for _, instance := range pool.Instances {
- ret = append(ret, s.sqlToParamsInstance(instance))
- }
- }
- return ret, nil
-}
-
-func (s *sqlDatabase) UpdateOrganizationPool(ctx context.Context, orgID, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.OrganizationPool, orgID, poolID, "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.updatePool(pool, param)
-}
-
-func (s *sqlDatabase) getPoolByID(ctx context.Context, poolID string, preload ...string) (Pool, error) {
- u, err := uuid.FromString(poolID)
- if err != nil {
- return Pool{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
- }
- var pool Pool
- q := s.conn.Model(&Pool{})
- if len(preload) > 0 {
- for _, item := range preload {
- q = q.Preload(item)
- }
- }
-
- q = q.Where("id = ?", u).First(&pool)
-
- if q.Error != nil {
- if errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return Pool{}, runnerErrors.ErrNotFound
- }
- return Pool{}, errors.Wrap(q.Error, "fetching org from database")
- }
- return pool, nil
-}
-
-func (s *sqlDatabase) getOrgPools(ctx context.Context, orgID string, preload ...string) ([]Pool, error) {
- _, err := s.getOrgByID(ctx, orgID)
- if err != nil {
- return nil, errors.Wrap(err, "fetching org")
- }
-
- q := s.conn
- if len(preload) > 0 {
- for _, item := range preload {
- q = q.Preload(item)
- }
- }
-
- var pools []Pool
- err = q.Model(&Pool{}).
- Where("org_id = ?", orgID).
- Omit("extra_specs").
- Find(&pools).Error
-
- if err != nil {
- return nil, errors.Wrap(err, "fetching pool")
- }
-
- return pools, nil
-}
-
-func (s *sqlDatabase) getOrgByID(ctx context.Context, id string, preload ...string) (Organization, error) {
- u, err := uuid.FromString(id)
- if err != nil {
- return Organization{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return Organization{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
}
var org Organization
- q := s.conn
+ q := db
if len(preload) > 0 {
for _, field := range preload {
q = q.Preload(field)
@@ -345,40 +258,26 @@ func (s *sqlDatabase) getOrgByID(ctx context.Context, id string, preload ...stri
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Organization{}, runnerErrors.ErrNotFound
}
- return Organization{}, errors.Wrap(q.Error, "fetching org from database")
+ return Organization{}, fmt.Errorf("error fetching org from database: %w", q.Error)
}
return org, nil
}
-func (s *sqlDatabase) getOrg(ctx context.Context, name string) (Organization, error) {
+func (s *sqlDatabase) getOrg(_ context.Context, name, endpointName string) (Organization, error) {
var org Organization
- q := s.conn.Where("name = ? COLLATE NOCASE", name)
- q = q.First(&org)
+ q := s.conn.Where("name = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE", name, endpointName).
+ Preload("Credentials").
+ Preload("GiteaCredentials").
+ Preload("Credentials.Endpoint").
+ Preload("GiteaCredentials.Endpoint").
+ Preload("Endpoint").
+ First(&org)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Organization{}, runnerErrors.ErrNotFound
}
- return Organization{}, errors.Wrap(q.Error, "fetching org from database")
+ return Organization{}, fmt.Errorf("error fetching org from database: %w", q.Error)
}
return org, nil
}
-
-func (s *sqlDatabase) getOrgPoolByUniqueFields(ctx context.Context, orgID string, provider, image, flavor string) (Pool, error) {
- org, err := s.getOrgByID(ctx, orgID)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching org")
- }
-
- q := s.conn
- var pool []Pool
- err = q.Model(&org).Association("Pools").Find(&pool, "provider_name = ? and image = ? and flavor = ?", provider, image, flavor)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching pool")
- }
- if len(pool) == 0 {
- return Pool{}, runnerErrors.ErrNotFound
- }
-
- return pool[0], nil
-}
diff --git a/database/sql/organizations_test.go b/database/sql/organizations_test.go
index 1c3cd999..245b3c1f 100644
--- a/database/sql/organizations_test.go
+++ b/database/sql/organizations_test.go
@@ -22,16 +22,16 @@ import (
"sort"
"testing"
- dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
- garmTesting "github.com/cloudbase/garm/internal/testing"
- "github.com/cloudbase/garm/params"
-
"github.com/stretchr/testify/suite"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ "github.com/cloudbase/garm/auth"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type OrgTestFixtures struct {
@@ -39,7 +39,7 @@ type OrgTestFixtures struct {
CreateOrgParams params.CreateOrgParams
CreatePoolParams params.CreatePoolParams
CreateInstanceParams params.CreateInstanceParams
- UpdateRepoParams params.UpdateRepositoryParams
+ UpdateRepoParams params.UpdateEntityParams
UpdatePoolParams params.UpdatePoolParams
SQLMock sqlmock.Sqlmock
}
@@ -49,6 +49,15 @@ type OrgTestSuite struct {
Store dbCommon.Store
StoreSQLMocked *sqlDatabase
Fixtures *OrgTestFixtures
+
+ adminCtx context.Context
+ adminUserID string
+
+ testCreds params.ForgeCredentials
+ testCredsGitea params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
+ giteaEndpoint params.ForgeEndpoint
}
func (s *OrgTestSuite) equalInstancesByName(expected, actual []params.Instance) {
@@ -71,23 +80,36 @@ func (s *OrgTestSuite) assertSQLMockExpectations() {
func (s *OrgTestSuite) SetupTest() {
// create testing sqlite database
- db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
+ dbConfig := garmTesting.GetTestSqliteDBConfig(s.T())
+ db, err := NewSQLDatabase(context.Background(), dbConfig)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
s.Store = db
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.adminCtx = adminCtx
+ s.adminUserID = auth.UserID(adminCtx)
+ s.Require().NotEmpty(s.adminUserID)
+
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.giteaEndpoint = garmTesting.CreateDefaultGiteaEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.testCredsGitea = garmTesting.CreateTestGiteaCredentials(adminCtx, "new-creds", db, s.T(), s.giteaEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+
// create some organization objects in the database, for testing purposes
orgs := []params.Organization{}
for i := 1; i <= 3; i++ {
org, err := db.CreateOrganization(
- context.Background(),
+ s.adminCtx,
fmt.Sprintf("test-org-%d", i),
- fmt.Sprintf("test-creds-%d", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%d", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
- s.FailNow(fmt.Sprintf("failed to create database object (test-org-%d)", i))
+ s.FailNow(fmt.Sprintf("failed to create database object (test-org-%d): %q", i, err))
}
orgs = append(orgs, org)
@@ -104,7 +126,7 @@ func (s *OrgTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -113,7 +135,7 @@ func (s *OrgTestSuite) SetupTest() {
}
s.StoreSQLMocked = &sqlDatabase{
conn: gormConn,
- cfg: garmTesting.GetTestSqliteDBConfig(s.T()),
+ cfg: dbConfig,
}
// setup test fixtures
@@ -122,8 +144,8 @@ func (s *OrgTestSuite) SetupTest() {
fixtures := &OrgTestFixtures{
Orgs: orgs,
CreateOrgParams: params.CreateOrgParams{
- Name: "new-test-org",
- CredentialsName: "new-creds",
+ Name: s.testCreds.Name,
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "new-webhook-secret",
},
CreatePoolParams: params.CreatePoolParams{
@@ -135,14 +157,14 @@ func (s *OrgTestSuite) SetupTest() {
Flavor: "test-flavor",
OSType: "linux",
OSArch: "amd64",
- Tags: []string{"self-hosted", "arm64", "linux"},
+ Tags: []string{"amd64-linux-runner"},
},
CreateInstanceParams: params.CreateInstanceParams{
Name: "test-instance-name",
OSType: "linux",
},
- UpdateRepoParams: params.UpdateRepositoryParams{
- CredentialsName: "test-update-creds",
+ UpdateRepoParams: params.UpdateEntityParams{
+ CredentialsName: s.secondaryTestCreds.Name,
WebhookSecret: "test-update-repo-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -159,20 +181,77 @@ func (s *OrgTestSuite) SetupTest() {
func (s *OrgTestSuite) TestCreateOrganization() {
// call tested function
org, err := s.Store.CreateOrganization(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateOrgParams.Name,
- s.Fixtures.CreateOrgParams.CredentialsName,
- s.Fixtures.CreateOrgParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
// assertions
s.Require().Nil(err)
- storeOrg, err := s.Store.GetOrganizationByID(context.Background(), org.ID)
+ storeOrg, err := s.Store.GetOrganizationByID(s.adminCtx, org.ID)
if err != nil {
s.FailNow(fmt.Sprintf("failed to get organization by id: %v", err))
}
s.Require().Equal(storeOrg.Name, org.Name)
- s.Require().Equal(storeOrg.CredentialsName, org.CredentialsName)
+ s.Require().Equal(storeOrg.Credentials.Name, org.Credentials.Name)
s.Require().Equal(storeOrg.WebhookSecret, org.WebhookSecret)
+
+ entity, err := org.GetEntity()
+ s.Require().Nil(err)
+ s.Require().Equal(entity.EntityType, params.ForgeEntityTypeOrganization)
+ s.Require().Equal(entity.ID, org.ID)
+
+ forgeType, err := entity.GetForgeType()
+ s.Require().Nil(err)
+ s.Require().Equal(forgeType, params.GithubEndpointType)
+}
+
+func (s *OrgTestSuite) TestCreateOrgForGitea() {
+ // call tested function
+ org, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ s.Fixtures.CreateOrgParams.Name,
+ s.testCredsGitea,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
+
+ // assertions
+ s.Require().Nil(err)
+ storeOrg, err := s.Store.GetOrganizationByID(s.adminCtx, org.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to get organization by id: %v", err))
+ }
+ s.Require().Equal(storeOrg.Name, org.Name)
+ s.Require().Equal(storeOrg.Credentials.Name, org.Credentials.Name)
+ s.Require().Equal(storeOrg.WebhookSecret, org.WebhookSecret)
+
+ entity, err := org.GetEntity()
+ s.Require().Nil(err)
+ s.Require().Equal(entity.EntityType, params.ForgeEntityTypeOrganization)
+ s.Require().Equal(entity.ID, org.ID)
+
+ forgeType, err := entity.GetForgeType()
+ s.Require().Nil(err)
+ s.Require().Equal(forgeType, params.GiteaEndpointType)
+}
+
+func (s *OrgTestSuite) TestCreateOrganizationInvalidForgeType() {
+ credentials := params.ForgeCredentials{
+ Name: "test-creds",
+ Endpoint: s.githubEndpoint,
+ ID: 99,
+ ForgeType: params.EndpointType("invalid-forge-type"),
+ }
+
+ _, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ s.Fixtures.CreateOrgParams.Name,
+ credentials,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
+ s.Require().NotNil(err)
+ s.Require().Equal("error creating org: unsupported credentials type: invalid request", err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationInvalidDBPassphrase() {
@@ -182,20 +261,21 @@ func (s *OrgTestSuite) TestCreateOrganizationInvalidDBPassphrase() {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
// make sure we use a 'sqlDatabase' struct with a wrong 'cfg.Passphrase'
- cfg.Passphrase = "wrong-passphrase" // it must have a size different than 32
+ cfg.Passphrase = wrongPassphrase // it must have a size different than 32
sqlDB := &sqlDatabase{
conn: conn,
cfg: cfg,
}
_, err = sqlDB.CreateOrganization(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateOrgParams.Name,
- s.Fixtures.CreateOrgParams.CredentialsName,
- s.Fixtures.CreateOrgParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
s.Require().NotNil(err)
- s.Require().Equal("failed to encrypt string", err.Error())
+ s.Require().Equal("error encoding secret: invalid passphrase length (expected length 32 characters)", err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationDBCreateErr() {
@@ -206,18 +286,19 @@ func (s *OrgTestSuite) TestCreateOrganizationDBCreateErr() {
s.Fixtures.SQLMock.ExpectRollback()
_, err := s.StoreSQLMocked.CreateOrganization(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateOrgParams.Name,
- s.Fixtures.CreateOrgParams.CredentialsName,
- s.Fixtures.CreateOrgParams.WebhookSecret)
+ s.testCreds,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("creating org: creating org mock error", err.Error())
+ s.Require().Equal("error creating org: error creating org: creating org mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestGetOrganization() {
- org, err := s.Store.GetOrganization(context.Background(), s.Fixtures.Orgs[0].Name)
+ org, err := s.Store.GetOrganization(s.adminCtx, s.Fixtures.Orgs[0].Name, s.Fixtures.Orgs[0].Endpoint.Name)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Orgs[0].Name, org.Name)
@@ -225,71 +306,127 @@ func (s *OrgTestSuite) TestGetOrganization() {
}
func (s *OrgTestSuite) TestGetOrganizationCaseInsensitive() {
- org, err := s.Store.GetOrganization(context.Background(), "TeSt-oRg-1")
+ org, err := s.Store.GetOrganization(s.adminCtx, "TeSt-oRg-1", "github.com")
s.Require().Nil(err)
s.Require().Equal("test-org-1", org.Name)
}
func (s *OrgTestSuite) TestGetOrganizationNotFound() {
- _, err := s.Store.GetOrganization(context.Background(), "dummy-name")
+ _, err := s.Store.GetOrganization(s.adminCtx, "dummy-name", "github.com")
s.Require().NotNil(err)
- s.Require().Equal("fetching org: not found", err.Error())
+ s.Require().Equal("error fetching org: not found", err.Error())
}
func (s *OrgTestSuite) TestGetOrganizationDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE name = ? COLLATE NOCASE AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].Name).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE (name = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE) AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].Name, s.Fixtures.Orgs[0].Endpoint.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow(s.Fixtures.Orgs[0].Name))
- _, err := s.StoreSQLMocked.GetOrganization(context.Background(), s.Fixtures.Orgs[0].Name)
+ _, err := s.StoreSQLMocked.GetOrganization(s.adminCtx, s.Fixtures.Orgs[0].Name, s.Fixtures.Orgs[0].Endpoint.Name)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching org: missing secret", err.Error())
+ s.Require().Equal("error fetching org: missing secret", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestListOrganizations() {
- orgs, err := s.Store.ListOrganizations(context.Background())
+ orgs, err := s.Store.ListOrganizations(s.adminCtx, params.OrganizationFilter{})
s.Require().Nil(err)
garmTesting.EqualDBEntityByName(s.T(), s.Fixtures.Orgs, orgs)
}
+func (s *OrgTestSuite) TestListOrganizationsWithFilters() {
+ org, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ "test-org",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ org2, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ "test-org",
+ s.testCredsGitea,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ org3, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ "test-org2",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+ orgs, err := s.Store.ListOrganizations(
+ s.adminCtx,
+ params.OrganizationFilter{
+ Name: "test-org",
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org, org2}, orgs)
+
+ orgs, err = s.Store.ListOrganizations(
+ s.adminCtx,
+ params.OrganizationFilter{
+ Name: "test-org",
+ Endpoint: s.giteaEndpoint.Name,
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org2}, orgs)
+
+ orgs, err = s.Store.ListOrganizations(
+ s.adminCtx,
+ params.OrganizationFilter{
+ Name: "test-org2",
+ })
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org3}, orgs)
+}
+
func (s *OrgTestSuite) TestListOrganizationsDBFetchErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE `organizations`.`deleted_at` IS NULL")).
WillReturnError(fmt.Errorf("fetching user from database mock error"))
- _, err := s.StoreSQLMocked.ListOrganizations(context.Background())
+ _, err := s.StoreSQLMocked.ListOrganizations(s.adminCtx, params.OrganizationFilter{})
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching org from database: fetching user from database mock error", err.Error())
+ s.Require().Equal("error fetching org from database: fetching user from database mock error", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganization() {
- err := s.Store.DeleteOrganization(context.Background(), s.Fixtures.Orgs[0].ID)
+ err := s.Store.DeleteOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID)
s.Require().Nil(err)
- _, err = s.Store.GetOrganizationByID(context.Background(), s.Fixtures.Orgs[0].ID)
+ _, err = s.Store.GetOrganizationByID(s.adminCtx, s.Fixtures.Orgs[0].ID)
s.Require().NotNil(err)
- s.Require().Equal("fetching org: not found", err.Error())
+ s.Require().Equal("error fetching org: not found", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationInvalidOrgID() {
- err := s.Store.DeleteOrganization(context.Background(), "dummy-org-id")
+ err := s.Store.DeleteOrganization(s.adminCtx, "dummy-org-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching org: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching org: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationDBDeleteErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@@ -298,114 +435,153 @@ func (s *OrgTestSuite) TestDeleteOrganizationDBDeleteErr() {
WillReturnError(fmt.Errorf("mocked delete org error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeleteOrganization(context.Background(), s.Fixtures.Orgs[0].ID)
+ err := s.StoreSQLMocked.DeleteOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("deleting org: mocked delete org error", err.Error())
+ s.Require().Equal("error deleting org: mocked delete org error", err.Error())
}
func (s *OrgTestSuite) TestUpdateOrganization() {
- org, err := s.Store.UpdateOrganization(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
+ org, err := s.Store.UpdateOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, org.WebhookSecret)
}
func (s *OrgTestSuite) TestUpdateOrganizationInvalidOrgID() {
- _, err := s.Store.UpdateOrganization(context.Background(), "dummy-org-id", s.Fixtures.UpdateRepoParams)
+ _, err := s.Store.UpdateOrganization(s.adminCtx, "dummy-org-id", s.Fixtures.UpdateRepoParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching org: parsing id: invalid request", err.Error())
+ s.Require().Equal("error saving org: error fetching org: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestUpdateOrganizationDBEncryptErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
-
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Orgs[0].ID, s.Fixtures.Orgs[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateOrganization(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving org: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error saving org: saving org: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestUpdateOrganizationDBSaveErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Orgs[0].ID, s.Fixtures.Orgs[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
s.Fixtures.SQLMock.
ExpectExec(("UPDATE `organizations` SET")).
WillReturnError(fmt.Errorf("saving org mock error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateOrganization(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving org: saving org mock error", err.Error())
+ s.Require().Equal("error saving org: error saving org: saving org mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestUpdateOrganizationDBDecryptingErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
- s.Fixtures.UpdateRepoParams.WebhookSecret = "webhook-secret"
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.UpdateRepoParams.WebhookSecret = webhookSecret
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Orgs[0].ID, s.Fixtures.Orgs[0].Endpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateOrganization(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateOrganization(s.adminCtx, s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving org: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error saving org: saving org: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestGetOrganizationByID() {
- org, err := s.Store.GetOrganizationByID(context.Background(), s.Fixtures.Orgs[0].ID)
+ org, err := s.Store.GetOrganizationByID(s.adminCtx, s.Fixtures.Orgs[0].ID)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Orgs[0].ID, org.ID)
}
func (s *OrgTestSuite) TestGetOrganizationByIDInvalidOrgID() {
- _, err := s.Store.GetOrganizationByID(context.Background(), "dummy-org-id")
+ _, err := s.Store.GetOrganizationByID(s.adminCtx, "dummy-org-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching org: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching org: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestGetOrganizationByIDDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organization_events` WHERE `organization_events`.`org_id` = ? AND `organization_events`.`deleted_at` IS NULL")).
+ WithArgs(s.Fixtures.Orgs[0].ID).
+ WillReturnRows(sqlmock.NewRows([]string{"org_id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND `pools`.`deleted_at` IS NULL")).
WithArgs(s.Fixtures.Orgs[0].ID).
WillReturnRows(sqlmock.NewRows([]string{"org_id"}).AddRow(s.Fixtures.Orgs[0].ID))
- _, err := s.StoreSQLMocked.GetOrganizationByID(context.Background(), s.Fixtures.Orgs[0].ID)
+ _, err := s.StoreSQLMocked.GetOrganizationByID(s.adminCtx, s.Fixtures.Orgs[0].ID)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching enterprise: missing secret", err.Error())
+ s.Require().Equal("error fetching org: missing secret", err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationPool() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().Nil(err)
- org, err := s.Store.GetOrganizationByID(context.Background(), s.Fixtures.Orgs[0].ID)
+ org, err := s.Store.GetOrganizationByID(s.adminCtx, s.Fixtures.Orgs[0].ID)
if err != nil {
s.FailNow(fmt.Sprintf("cannot get org by ID: %v", err))
}
@@ -418,216 +594,120 @@ func (s *OrgTestSuite) TestCreateOrganizationPool() {
func (s *OrgTestSuite) TestCreateOrganizationPoolMissingTags() {
s.Fixtures.CreatePoolParams.Tags = []string{}
-
- _, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
s.Require().Equal("no tags specified", err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationPoolInvalidOrgID() {
- _, err := s.Store.CreateOrganizationPool(context.Background(), "dummy-org-id", s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ _, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching org: parsing id: invalid request", err.Error())
-}
-
-func (s *OrgTestSuite) TestCreateOrganizationPoolDBCreateErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WillReturnError(fmt.Errorf("mocked creating pool error"))
-
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("creating pool: fetching pool: mocked creating pool error", err.Error())
-}
-
-func (s *OrgTestSuite) TestCreateOrganizationDBPoolAlreadyExistErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"org_id", "provider_name", "image", "flavor"}).
- AddRow(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor))
-
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal(runnerErrors.NewConflictError("pool with the same image and flavor already exists on this provider"), err)
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationPoolDBFetchTagErr() {
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked fetching tag error"))
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching tag: fetching tag from database: mocked fetching tag error", err.Error())
+ s.Require().Equal("error creating tag: error fetching tag from database: mocked fetching tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestCreateOrganizationPoolDBAddingPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnError(fmt.Errorf("mocked adding pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("adding pool: mocked adding pool error", err.Error())
+ s.Require().Equal("error creating pool: mocked adding pool error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestCreateOrganizationPoolDBSaveTagErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnError(fmt.Errorf("mocked saving tag error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving tag: mocked saving tag error", err.Error())
+ s.Require().Equal("error associating tags: mocked saving tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestCreateOrganizationPoolDBFetchPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Orgs[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Orgs[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnResult(sqlmock.NewResult(1, 1))
@@ -639,161 +719,165 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolDBFetchPoolErr() {
WillReturnResult(sqlmock.NewResult(1, 1))
s.Fixtures.SQLMock.ExpectCommit()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
- _, err := s.StoreSQLMocked.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: not found", err.Error())
+ s.Require().Equal("error fetching pool by ID: not found", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *OrgTestSuite) TestListOrgPools() {
orgPools := []params.Pool{}
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Flavor = fmt.Sprintf("test-flavor-%v", i)
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
orgPools = append(orgPools, pool)
}
-
- pools, err := s.Store.ListOrgPools(context.Background(), s.Fixtures.Orgs[0].ID)
+ pools, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().Nil(err)
garmTesting.EqualDBEntityID(s.T(), orgPools, pools)
}
func (s *OrgTestSuite) TestListOrgPoolsInvalidOrgID() {
- _, err := s.Store.ListOrgPools(context.Background(), "dummy-org-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ _, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching pools: fetching org: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pools: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestGetOrganizationPool() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
- orgPool, err := s.Store.GetOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, pool.ID)
+ orgPool, err := s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
s.Require().Equal(orgPool.ID, pool.ID)
}
func (s *OrgTestSuite) TestGetOrganizationPoolInvalidOrgID() {
- _, err := s.Store.GetOrganizationPool(context.Background(), "dummy-org-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ _, err := s.Store.GetEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("fetching pool: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationPool() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
- err = s.Store.DeleteOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, pool.ID)
+ err = s.Store.DeleteEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
- _, err = s.Store.GetOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationPoolInvalidOrgID() {
- err := s.Store.DeleteOrganizationPool(context.Background(), "dummy-org-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ err := s.Store.DeleteEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("looking up org pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationPoolDBDeleteErr() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (id = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID, s.Fixtures.Orgs[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"org_id", "id"}).AddRow(s.Fixtures.Orgs[0].ID, pool.ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE `pools`.`id` = ?")).
- WithArgs(pool.ID).
+ ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE id = ? and org_id = ?")).
+ WithArgs(pool.ID, s.Fixtures.Orgs[0].ID).
WillReturnError(fmt.Errorf("mocked deleting pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- err = s.StoreSQLMocked.DeleteOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, pool.ID)
+ err = s.StoreSQLMocked.DeleteEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().NotNil(err)
+ s.Require().Equal("error removing pool: mocked deleting pool error", err.Error())
s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("deleting pool: mocked deleting pool error", err.Error())
-}
-
-func (s *OrgTestSuite) TestFindOrganizationPoolByTags() {
- orgPool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
- if err != nil {
- s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
- }
-
- pool, err := s.Store.FindOrganizationPoolByTags(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams.Tags)
-
- s.Require().Nil(err)
- s.Require().Equal(orgPool.ID, pool.ID)
- s.Require().Equal(orgPool.Image, pool.Image)
- s.Require().Equal(orgPool.Flavor, pool.Flavor)
-}
-
-func (s *OrgTestSuite) TestFindOrganizationPoolByTagsMissingTags() {
- tags := []string{}
-
- _, err := s.Store.FindOrganizationPoolByTags(context.Background(), s.Fixtures.Orgs[0].ID, tags)
-
- s.Require().NotNil(err)
- s.Require().Equal("fetching pool: missing tags", err.Error())
}
func (s *OrgTestSuite) TestListOrgInstances() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
poolInstances := []params.Instance{}
for i := 1; i <= 3; i++ {
s.Fixtures.CreateInstanceParams.Name = fmt.Sprintf("test-org-%v", i)
- instance, err := s.Store.CreateInstance(context.Background(), pool.ID, s.Fixtures.CreateInstanceParams)
+ instance, err := s.Store.CreateInstance(s.adminCtx, pool.ID, s.Fixtures.CreateInstanceParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create instance: %s", err))
}
poolInstances = append(poolInstances, instance)
}
- instances, err := s.Store.ListOrgInstances(context.Background(), s.Fixtures.Orgs[0].ID)
+ instances, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().Nil(err)
s.equalInstancesByName(poolInstances, instances)
}
func (s *OrgTestSuite) TestListOrgInstancesInvalidOrgID() {
- _, err := s.Store.ListOrgInstances(context.Background(), "dummy-org-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ _, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching org: fetching org: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching entity: error parsing id: invalid request", err.Error())
}
func (s *OrgTestSuite) TestUpdateOrganizationPool() {
- pool, err := s.Store.CreateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Orgs[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
- pool, err = s.Store.UpdateOrganizationPool(context.Background(), s.Fixtures.Orgs[0].ID, pool.ID, s.Fixtures.UpdatePoolParams)
+ pool, err = s.Store.UpdateEntityPool(s.adminCtx, entity, pool.ID, s.Fixtures.UpdatePoolParams)
s.Require().Nil(err)
s.Require().Equal(*s.Fixtures.UpdatePoolParams.MaxRunners, pool.MaxRunners)
@@ -802,11 +886,37 @@ func (s *OrgTestSuite) TestUpdateOrganizationPool() {
s.Require().Equal(s.Fixtures.UpdatePoolParams.Flavor, pool.Flavor)
}
+func (s *OrgTestSuite) TestAddOrgEntityEvent() {
+ org, err := s.Store.CreateOrganization(
+ s.adminCtx,
+ s.Fixtures.CreateOrgParams.Name,
+ s.testCreds,
+ s.Fixtures.CreateOrgParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
+
+ s.Require().Nil(err)
+ entity, err := org.GetEntity()
+ s.Require().Nil(err)
+ err = s.Store.AddEntityEvent(s.adminCtx, entity, params.StatusEvent, params.EventInfo, "this is a test", 20)
+ s.Require().Nil(err)
+
+ org, err = s.Store.GetOrganizationByID(s.adminCtx, org.ID)
+ s.Require().Nil(err)
+ s.Require().Equal(1, len(org.Events))
+ s.Require().Equal(params.StatusEvent, org.Events[0].EventType)
+ s.Require().Equal(params.EventInfo, org.Events[0].EventLevel)
+ s.Require().Equal("this is a test", org.Events[0].Message)
+}
+
func (s *OrgTestSuite) TestUpdateOrganizationPoolInvalidOrgID() {
- _, err := s.Store.UpdateOrganizationPool(context.Background(), "dummy-org-id", "dummy-pool-id", s.Fixtures.UpdatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-org-id",
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ _, err := s.Store.UpdateEntityPool(s.adminCtx, entity, "dummy-pool-id", s.Fixtures.UpdatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error parsing id: invalid request", err.Error())
}
func TestOrgTestSuite(t *testing.T) {
diff --git a/database/sql/pools.go b/database/sql/pools.go
index 35e704a1..e86087ad 100644
--- a/database/sql/pools.go
+++ b/database/sql/pools.go
@@ -16,99 +16,450 @@ package sql
import (
"context"
+ "errors"
"fmt"
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
-
- "github.com/pkg/errors"
- uuid "github.com/satori/go.uuid"
+ "github.com/google/uuid"
+ "gorm.io/datatypes"
"gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
)
-func (s *sqlDatabase) ListAllPools(ctx context.Context) ([]params.Pool, error) {
+const (
+ entityTypeEnterpriseName = "enterprise_id"
+ entityTypeOrgName = "org_id"
+ entityTypeRepoName = "repo_id"
+)
+
+func (s *sqlDatabase) ListAllPools(_ context.Context) ([]params.Pool, error) {
var pools []Pool
- q := s.conn.Model(&Pool{}).
+ q := s.conn.
Preload("Tags").
Preload("Organization").
+ Preload("Organization.Endpoint").
Preload("Repository").
+ Preload("Repository.Endpoint").
Preload("Enterprise").
+ Preload("Enterprise.Endpoint").
Omit("extra_specs").
Find(&pools)
if q.Error != nil {
- return nil, errors.Wrap(q.Error, "fetching all pools")
+ return nil, fmt.Errorf("error fetching all pools: %w", q.Error)
}
ret := make([]params.Pool, len(pools))
+ var err error
for idx, val := range pools {
- ret[idx] = s.sqlToCommonPool(val)
+ ret[idx], err = s.sqlToCommonPool(val)
+ if err != nil {
+ return nil, fmt.Errorf("error converting pool: %w", err)
+ }
}
return ret, nil
}
-func (s *sqlDatabase) GetPoolByID(ctx context.Context, poolID string) (params.Pool, error) {
- pool, err := s.getPoolByID(ctx, poolID, "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool by ID")
+func (s *sqlDatabase) GetPoolByID(_ context.Context, poolID string) (params.Pool, error) {
+ preloadList := []string{
+ "Tags",
+ "Instances",
+ "Enterprise",
+ "Enterprise.Endpoint",
+ "Organization",
+ "Organization.Endpoint",
+ "Repository",
+ "Repository.Endpoint",
}
- return s.sqlToCommonPool(pool), nil
+ pool, err := s.getPoolByID(s.conn, poolID, preloadList...)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error fetching pool by ID: %w", err)
+ }
+ return s.sqlToCommonPool(pool)
}
-func (s *sqlDatabase) DeletePoolByID(ctx context.Context, poolID string) error {
- pool, err := s.getPoolByID(ctx, poolID)
+func (s *sqlDatabase) DeletePoolByID(_ context.Context, poolID string) (err error) {
+ pool, err := s.getPoolByID(s.conn, poolID)
if err != nil {
- return errors.Wrap(err, "fetching pool by ID")
+ return fmt.Errorf("error fetching pool by ID: %w", err)
}
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.PoolEntityType, common.DeleteOperation, params.Pool{ID: poolID})
+ }
+ }()
+
if q := s.conn.Unscoped().Delete(&pool); q.Error != nil {
- return errors.Wrap(q.Error, "removing pool")
+ return fmt.Errorf("error removing pool: %w", q.Error)
}
return nil
}
-func (s *sqlDatabase) getEntityPool(ctx context.Context, entityType params.PoolType, entityID, poolID string, preload ...string) (Pool, error) {
+func (s *sqlDatabase) getEntityPool(tx *gorm.DB, entityType params.ForgeEntityType, entityID, poolID string, preload ...string) (Pool, error) {
if entityID == "" {
- return Pool{}, errors.Wrap(runnerErrors.ErrBadRequest, "missing entity id")
+ return Pool{}, fmt.Errorf("error missing entity id: %w", runnerErrors.ErrBadRequest)
}
- u, err := uuid.FromString(poolID)
+ u, err := uuid.Parse(poolID)
if err != nil {
- return Pool{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return Pool{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
}
- q := s.conn
+ var fieldName string
+ var entityField string
+ switch entityType {
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ entityField = repositoryFieldName
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ entityField = organizationFieldName
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
+ entityField = enterpriseFieldName
+ default:
+ return Pool{}, fmt.Errorf("invalid entityType: %v", entityType)
+ }
+
+ q := tx
+ q = q.Preload(entityField)
if len(preload) > 0 {
for _, item := range preload {
q = q.Preload(item)
}
}
- var fieldName string
- switch entityType {
- case params.RepositoryPool:
- fieldName = "repo_id"
- case params.OrganizationPool:
- fieldName = "org_id"
- case params.EnterprisePool:
- fieldName = "enterprise_id"
- default:
- return Pool{}, fmt.Errorf("invalid entityType: %v", entityType)
- }
-
var pool Pool
condition := fmt.Sprintf("id = ? and %s = ?", fieldName)
err = q.Model(&Pool{}).
Where(condition, u, entityID).
First(&pool).Error
-
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
- return Pool{}, errors.Wrap(runnerErrors.ErrNotFound, "finding pool")
+ return Pool{}, fmt.Errorf("error finding pool: %w", runnerErrors.ErrNotFound)
}
- return Pool{}, errors.Wrap(err, "fetching pool")
+ return Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
return pool, nil
}
+
+func (s *sqlDatabase) listEntityPools(tx *gorm.DB, entityType params.ForgeEntityType, entityID string, preload ...string) ([]Pool, error) {
+ if _, err := uuid.Parse(entityID); err != nil {
+ return nil, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if err := s.hasGithubEntity(tx, entityType, entityID); err != nil {
+ return nil, fmt.Errorf("error checking entity existence: %w", err)
+ }
+
+ var preloadEntity string
+ var fieldName string
+ switch entityType {
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ preloadEntity = "Repository"
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ preloadEntity = "Organization"
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
+ preloadEntity = "Enterprise"
+ default:
+ return nil, fmt.Errorf("invalid entityType: %v", entityType)
+ }
+
+ q := tx
+ q = q.Preload(preloadEntity)
+ if len(preload) > 0 {
+ for _, item := range preload {
+ q = q.Preload(item)
+ }
+ }
+
+ var pools []Pool
+ condition := fmt.Sprintf("%s = ?", fieldName)
+ err := q.Model(&Pool{}).
+ Where(condition, entityID).
+ Omit("extra_specs").
+ Find(&pools).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return []Pool{}, nil
+ }
+ return nil, fmt.Errorf("error fetching pool: %w", err)
+ }
+
+ return pools, nil
+}
+
+func (s *sqlDatabase) findPoolByTags(id string, poolType params.ForgeEntityType, tags []string) ([]params.Pool, error) {
+ if len(tags) == 0 {
+ return nil, runnerErrors.NewBadRequestError("missing tags")
+ }
+ u, err := uuid.Parse(id)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ var fieldName string
+ switch poolType {
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
+ default:
+ return nil, fmt.Errorf("invalid poolType: %v", poolType)
+ }
+
+ var pools []Pool
+ where := fmt.Sprintf("tags.name COLLATE NOCASE in ? and %s = ? and enabled = true", fieldName)
+ q := s.conn.Joins("JOIN pool_tags on pool_tags.pool_id=pools.id").
+ Joins("JOIN tags on tags.id=pool_tags.tag_id").
+ Group("pools.id").
+ Preload("Tags").
+ Having("count(1) = ?", len(tags)).
+ Where(where, tags, u).
+ Order("priority desc").
+ Find(&pools)
+
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return nil, runnerErrors.ErrNotFound
+ }
+ return nil, fmt.Errorf("error fetching pool: %w", q.Error)
+ }
+
+ if len(pools) == 0 {
+ return nil, runnerErrors.ErrNotFound
+ }
+
+ ret := make([]params.Pool, len(pools))
+ for idx, val := range pools {
+ ret[idx], err = s.sqlToCommonPool(val)
+ if err != nil {
+ return nil, fmt.Errorf("error converting pool: %w", err)
+ }
+ }
+
+ return ret, nil
+}
+
+func (s *sqlDatabase) FindPoolsMatchingAllTags(_ context.Context, entityType params.ForgeEntityType, entityID string, tags []string) ([]params.Pool, error) {
+ if len(tags) == 0 {
+ return nil, runnerErrors.NewBadRequestError("missing tags")
+ }
+
+ pools, err := s.findPoolByTags(entityID, entityType, tags)
+ if err != nil {
+ if errors.Is(err, runnerErrors.ErrNotFound) {
+ return []params.Pool{}, nil
+ }
+ return nil, fmt.Errorf("error fetching pools: %w", err)
+ }
+
+ return pools, nil
+}
+
+func (s *sqlDatabase) CreateEntityPool(ctx context.Context, entity params.ForgeEntity, param params.CreatePoolParams) (pool params.Pool, err error) {
+ if len(param.Tags) == 0 {
+ return params.Pool{}, runnerErrors.NewBadRequestError("no tags specified")
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.PoolEntityType, common.CreateOperation, pool)
+ }
+ }()
+
+ newPool := Pool{
+ ProviderName: param.ProviderName,
+ MaxRunners: param.MaxRunners,
+ MinIdleRunners: param.MinIdleRunners,
+ RunnerPrefix: param.GetRunnerPrefix(),
+ Image: param.Image,
+ Flavor: param.Flavor,
+ OSType: param.OSType,
+ OSArch: param.OSArch,
+ Enabled: param.Enabled,
+ RunnerBootstrapTimeout: param.RunnerBootstrapTimeout,
+ GitHubRunnerGroup: param.GitHubRunnerGroup,
+ Priority: param.Priority,
+ }
+ if len(param.ExtraSpecs) > 0 {
+ newPool.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
+ }
+
+ entityID, err := uuid.Parse(entity.ID)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ switch entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ newPool.RepoID = &entityID
+ case params.ForgeEntityTypeOrganization:
+ newPool.OrgID = &entityID
+ case params.ForgeEntityTypeEnterprise:
+ newPool.EnterpriseID = &entityID
+ }
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := s.hasGithubEntity(tx, entity.EntityType, entity.ID); err != nil {
+ return fmt.Errorf("error checking entity existence: %w", err)
+ }
+
+ tags := []Tag{}
+ for _, val := range param.Tags {
+ t, err := s.getOrCreateTag(tx, val)
+ if err != nil {
+ return fmt.Errorf("error creating tag: %w", err)
+ }
+ tags = append(tags, t)
+ }
+
+ q := tx.Create(&newPool)
+ if q.Error != nil {
+ return fmt.Errorf("error creating pool: %w", q.Error)
+ }
+
+ for i := range tags {
+ if err := tx.Model(&newPool).Association("Tags").Append(&tags[i]); err != nil {
+ return fmt.Errorf("error associating tags: %w", err)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return params.Pool{}, err
+ }
+
+ return s.GetPoolByID(ctx, newPool.ID.String())
+}
+
+func (s *sqlDatabase) GetEntityPool(_ context.Context, entity params.ForgeEntity, poolID string) (params.Pool, error) {
+ preloadList := []string{
+ "Tags",
+ "Instances",
+ "Enterprise",
+ "Enterprise.Endpoint",
+ "Organization",
+ "Organization.Endpoint",
+ "Repository",
+ "Repository.Endpoint",
+ }
+ pool, err := s.getEntityPool(s.conn, entity.EntityType, entity.ID, poolID, preloadList...)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("fetching pool: %w", err)
+ }
+ return s.sqlToCommonPool(pool)
+}
+
+func (s *sqlDatabase) DeleteEntityPool(_ context.Context, entity params.ForgeEntity, poolID string) (err error) {
+ entityID, err := uuid.Parse(entity.ID)
+ if err != nil {
+ return fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ defer func() {
+ if err == nil {
+ pool := params.Pool{
+ ID: poolID,
+ }
+ s.sendNotify(common.PoolEntityType, common.DeleteOperation, pool)
+ }
+ }()
+
+ poolUUID, err := uuid.Parse(poolID)
+ if err != nil {
+ return fmt.Errorf("error parsing pool id: %w", runnerErrors.ErrBadRequest)
+ }
+ var fieldName string
+ switch entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
+ default:
+ return fmt.Errorf("invalid entityType: %v", entity.EntityType)
+ }
+ condition := fmt.Sprintf("id = ? and %s = ?", fieldName)
+ if err := s.conn.Unscoped().Where(condition, poolUUID, entityID).Delete(&Pool{}).Error; err != nil {
+ return fmt.Errorf("error removing pool: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) UpdateEntityPool(ctx context.Context, entity params.ForgeEntity, poolID string, param params.UpdatePoolParams) (updatedPool params.Pool, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.PoolEntityType, common.UpdateOperation, updatedPool)
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ pool, err := s.getEntityPool(tx, entity.EntityType, entity.ID, poolID, "Tags", "Instances")
+ if err != nil {
+ return fmt.Errorf("error fetching pool: %w", err)
+ }
+
+ updatedPool, err = s.updatePool(tx, pool, param)
+ if err != nil {
+ return fmt.Errorf("error updating pool: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.Pool{}, err
+ }
+
+ updatedPool, err = s.GetPoolByID(ctx, poolID)
+ if err != nil {
+ return params.Pool{}, err
+ }
+ return updatedPool, nil
+}
+
+func (s *sqlDatabase) ListEntityPools(_ context.Context, entity params.ForgeEntity) ([]params.Pool, error) {
+ pools, err := s.listEntityPools(s.conn, entity.EntityType, entity.ID, "Tags")
+ if err != nil {
+ return nil, fmt.Errorf("error fetching pools: %w", err)
+ }
+
+ ret := make([]params.Pool, len(pools))
+ for idx, pool := range pools {
+ ret[idx], err = s.sqlToCommonPool(pool)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching pool: %w", err)
+ }
+ }
+
+ return ret, nil
+}
+
+func (s *sqlDatabase) ListEntityInstances(_ context.Context, entity params.ForgeEntity) ([]params.Instance, error) {
+ pools, err := s.listEntityPools(s.conn, entity.EntityType, entity.ID, "Instances", "Instances.Job")
+ if err != nil {
+ return nil, fmt.Errorf("error fetching entity: %w", err)
+ }
+ ret := []params.Instance{}
+ for _, pool := range pools {
+ instances := pool.Instances
+ pool.Instances = nil
+ for _, instance := range instances {
+ instance.Pool = pool
+ paramsInstance, err := s.sqlToParamsInstance(instance)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching instance: %w", err)
+ }
+ ret = append(ret, paramsInstance)
+ }
+ }
+ return ret, nil
+}
diff --git a/database/sql/pools_test.go b/database/sql/pools_test.go
index 69039813..297f4cdf 100644
--- a/database/sql/pools_test.go
+++ b/database/sql/pools_test.go
@@ -16,20 +16,23 @@ package sql
import (
"context"
+ "encoding/json"
"flag"
"fmt"
"regexp"
"testing"
- dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
- "github.com/cloudbase/garm/params"
-
"github.com/stretchr/testify/suite"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type PoolsTestFixtures struct {
@@ -40,9 +43,12 @@ type PoolsTestFixtures struct {
type PoolsTestSuite struct {
suite.Suite
- Store dbCommon.Store
+ Store dbCommon.Store
+ ctx context.Context
+
StoreSQLMocked *sqlDatabase
Fixtures *PoolsTestFixtures
+ adminCtx context.Context
}
func (s *PoolsTestSuite) assertSQLMockExpectations() {
@@ -52,26 +58,42 @@ func (s *PoolsTestSuite) assertSQLMockExpectations() {
}
}
+func (s *PoolsTestSuite) TearDownTest() {
+ watcher.CloseWatcher()
+}
+
func (s *PoolsTestSuite) SetupTest() {
// create testing sqlite database
+ ctx := context.Background()
+ watcher.InitWatcher(ctx)
+
db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
s.Store = db
+ s.ctx = garmTesting.ImpersonateAdminContext(ctx, s.Store, s.T())
+
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.adminCtx = adminCtx
+
+ githubEndpoint := garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), githubEndpoint)
// create an organization for testing purposes
- org, err := s.Store.CreateOrganization(context.Background(), "test-org", "test-creds", "test-webhookSecret")
+ org, err := s.Store.CreateOrganization(s.adminCtx, "test-org", creds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create org: %s", err))
}
+ entity, err := org.GetEntity()
+ s.Require().Nil(err)
// create some pool objects in the database, for testing purposes
orgPools := []params.Pool{}
for i := 1; i <= 3; i++ {
- pool, err := db.CreateOrganizationPool(
- context.Background(),
- org.ID,
+ pool, err := db.CreateEntityPool(
+ s.adminCtx,
+ entity,
params.CreatePoolParams{
ProviderName: "test-provider",
MaxRunners: 4,
@@ -79,7 +101,7 @@ func (s *PoolsTestSuite) SetupTest() {
Image: fmt.Sprintf("test-image-%d", i),
Flavor: "test-flavor",
OSType: "linux",
- Tags: []string{"self-hosted", "amd64", "linux"},
+ Tags: []string{"amd64-linux-runner"},
},
)
if err != nil {
@@ -99,7 +121,7 @@ func (s *PoolsTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -120,7 +142,7 @@ func (s *PoolsTestSuite) SetupTest() {
}
func (s *PoolsTestSuite) TestListAllPools() {
- pools, err := s.Store.ListAllPools(context.Background())
+ pools, err := s.Store.ListAllPools(s.adminCtx)
s.Require().Nil(err)
garmTesting.EqualDBEntityID(s.T(), s.Fixtures.Pools, pools)
@@ -128,49 +150,49 @@ func (s *PoolsTestSuite) TestListAllPools() {
func (s *PoolsTestSuite) TestListAllPoolsDBFetchErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT `pools`.`id`,`pools`.`created_at`,`pools`.`updated_at`,`pools`.`deleted_at`,`pools`.`provider_name`,`pools`.`runner_prefix`,`pools`.`max_runners`,`pools`.`min_idle_runners`,`pools`.`runner_bootstrap_timeout`,`pools`.`image`,`pools`.`flavor`,`pools`.`os_type`,`pools`.`os_arch`,`pools`.`enabled`,`pools`.`git_hub_runner_group`,`pools`.`repo_id`,`pools`.`org_id`,`pools`.`enterprise_id` FROM `pools` WHERE `pools`.`deleted_at` IS NULL")).
+ ExpectQuery(regexp.QuoteMeta("SELECT `pools`.`id`,`pools`.`created_at`,`pools`.`updated_at`,`pools`.`deleted_at`,`pools`.`provider_name`,`pools`.`runner_prefix`,`pools`.`max_runners`,`pools`.`min_idle_runners`,`pools`.`runner_bootstrap_timeout`,`pools`.`image`,`pools`.`flavor`,`pools`.`os_type`,`pools`.`os_arch`,`pools`.`enabled`,`pools`.`git_hub_runner_group`,`pools`.`repo_id`,`pools`.`org_id`,`pools`.`enterprise_id`,`pools`.`priority` FROM `pools` WHERE `pools`.`deleted_at` IS NULL")).
WillReturnError(fmt.Errorf("mocked fetching all pools error"))
- _, err := s.StoreSQLMocked.ListAllPools(context.Background())
+ _, err := s.StoreSQLMocked.ListAllPools(s.adminCtx)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching all pools: mocked fetching all pools error", err.Error())
+ s.Require().Equal("error fetching all pools: mocked fetching all pools error", err.Error())
}
func (s *PoolsTestSuite) TestGetPoolByID() {
- pool, err := s.Store.GetPoolByID(context.Background(), s.Fixtures.Pools[0].ID)
+ pool, err := s.Store.GetPoolByID(s.adminCtx, s.Fixtures.Pools[0].ID)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Pools[0].ID, pool.ID)
}
func (s *PoolsTestSuite) TestGetPoolByIDInvalidPoolID() {
- _, err := s.Store.GetPoolByID(context.Background(), "dummy-pool-id")
+ _, err := s.Store.GetPoolByID(s.adminCtx, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching pool by ID: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool by ID: error parsing id: invalid request", err.Error())
}
func (s *PoolsTestSuite) TestDeletePoolByID() {
- err := s.Store.DeletePoolByID(context.Background(), s.Fixtures.Pools[0].ID)
+ err := s.Store.DeletePoolByID(s.adminCtx, s.Fixtures.Pools[0].ID)
s.Require().Nil(err)
- _, err = s.Store.GetPoolByID(context.Background(), s.Fixtures.Pools[0].ID)
- s.Require().Equal("fetching pool by ID: not found", err.Error())
+ _, err = s.Store.GetPoolByID(s.adminCtx, s.Fixtures.Pools[0].ID)
+ s.Require().Equal("error fetching pool by ID: not found", err.Error())
}
func (s *PoolsTestSuite) TestDeletePoolByIDInvalidPoolID() {
- err := s.Store.DeletePoolByID(context.Background(), "dummy-pool-id")
+ err := s.Store.DeletePoolByID(s.adminCtx, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching pool by ID: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool by ID: error parsing id: invalid request", err.Error())
}
func (s *PoolsTestSuite) TestDeletePoolByIDDBRemoveErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1 ")).
- WithArgs(s.Fixtures.Pools[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Pools[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Pools[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@@ -178,11 +200,137 @@ func (s *PoolsTestSuite) TestDeletePoolByIDDBRemoveErr() {
WillReturnError(fmt.Errorf("mocked removing pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeletePoolByID(context.Background(), s.Fixtures.Pools[0].ID)
+ err := s.StoreSQLMocked.DeletePoolByID(s.adminCtx, s.Fixtures.Pools[0].ID)
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("removing pool: mocked removing pool error", err.Error())
+ s.Require().Equal("error removing pool: mocked removing pool error", err.Error())
+}
+
+func (s *PoolsTestSuite) TestEntityPoolOperations() {
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.Store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.Store, s.T(), ep)
+ s.T().Cleanup(func() { s.Store.DeleteGithubCredentials(s.ctx, creds.ID) })
+ repo, err := s.Store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.Store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createPoolParams := params.CreatePoolParams{
+ ProviderName: "test-provider",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"test-tag"},
+ }
+
+ pool, err := s.Store.CreateEntityPool(s.ctx, entity, createPoolParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(pool.ID)
+ s.T().Cleanup(func() { s.Store.DeleteEntityPool(s.ctx, entity, pool.ID) })
+
+ entityPool, err := s.Store.GetEntityPool(s.ctx, entity, pool.ID)
+ s.Require().NoError(err)
+ s.Require().Equal(pool.ID, entityPool.ID)
+ s.Require().Equal(pool.ProviderName, entityPool.ProviderName)
+
+ updatePoolParams := params.UpdatePoolParams{
+ Enabled: garmTesting.Ptr(true),
+ Flavor: "new-flavor",
+ Image: "new-image",
+ RunnerPrefix: params.RunnerPrefix{
+ Prefix: "new-prefix",
+ },
+ MaxRunners: garmTesting.Ptr(uint(100)),
+ MinIdleRunners: garmTesting.Ptr(uint(50)),
+ OSType: commonParams.Windows,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"new-tag"},
+ RunnerBootstrapTimeout: garmTesting.Ptr(uint(10)),
+ ExtraSpecs: json.RawMessage(`{"extra": "specs"}`),
+ GitHubRunnerGroup: garmTesting.Ptr("new-group"),
+ Priority: garmTesting.Ptr(uint(1)),
+ }
+ pool, err = s.Store.UpdateEntityPool(s.ctx, entity, pool.ID, updatePoolParams)
+ s.Require().NoError(err)
+ s.Require().Equal(*updatePoolParams.Enabled, pool.Enabled)
+ s.Require().Equal(updatePoolParams.Flavor, pool.Flavor)
+ s.Require().Equal(updatePoolParams.Image, pool.Image)
+ s.Require().Equal(updatePoolParams.RunnerPrefix.Prefix, pool.RunnerPrefix.Prefix)
+ s.Require().Equal(*updatePoolParams.MaxRunners, pool.MaxRunners)
+ s.Require().Equal(*updatePoolParams.MinIdleRunners, pool.MinIdleRunners)
+ s.Require().Equal(updatePoolParams.OSType, pool.OSType)
+ s.Require().Equal(updatePoolParams.OSArch, pool.OSArch)
+ s.Require().Equal(*updatePoolParams.RunnerBootstrapTimeout, pool.RunnerBootstrapTimeout)
+ s.Require().Equal(updatePoolParams.ExtraSpecs, pool.ExtraSpecs)
+ s.Require().Equal(*updatePoolParams.GitHubRunnerGroup, pool.GitHubRunnerGroup)
+ s.Require().Equal(*updatePoolParams.Priority, pool.Priority)
+
+ entityPools, err := s.Store.ListEntityPools(s.ctx, entity)
+ s.Require().NoError(err)
+ s.Require().Len(entityPools, 1)
+ s.Require().Equal(pool.ID, entityPools[0].ID)
+
+ tagsToMatch := []string{"new-tag"}
+ pools, err := s.Store.FindPoolsMatchingAllTags(s.ctx, entity.EntityType, entity.ID, tagsToMatch)
+ s.Require().NoError(err)
+ s.Require().Len(pools, 1)
+ s.Require().Equal(pool.ID, pools[0].ID)
+
+ invalidTagsToMatch := []string{"invalid-tag"}
+ pools, err = s.Store.FindPoolsMatchingAllTags(s.ctx, entity.EntityType, entity.ID, invalidTagsToMatch)
+ s.Require().NoError(err)
+ s.Require().Len(pools, 0)
+}
+
+func (s *PoolsTestSuite) TestListEntityInstances() {
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.Store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.Store, s.T(), ep)
+ s.T().Cleanup(func() { s.Store.DeleteGithubCredentials(s.ctx, creds.ID) })
+ repo, err := s.Store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.Store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createPoolParams := params.CreatePoolParams{
+ ProviderName: "test-provider",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"test-tag"},
+ }
+
+ pool, err := s.Store.CreateEntityPool(s.ctx, entity, createPoolParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(pool.ID)
+ s.T().Cleanup(func() { s.Store.DeleteEntityPool(s.ctx, entity, pool.ID) })
+
+ createInstanceParams := params.CreateInstanceParams{
+ Name: "test-instance",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Status: commonParams.InstanceCreating,
+ }
+ instance, err := s.Store.CreateInstance(s.ctx, pool.ID, createInstanceParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(instance.ID)
+
+ s.T().Cleanup(func() { s.Store.DeleteInstance(s.ctx, pool.ID, instance.ID) })
+
+ instances, err := s.Store.ListEntityInstances(s.ctx, entity)
+ s.Require().NoError(err)
+ s.Require().Len(instances, 1)
+ s.Require().Equal(instance.ID, instances[0].ID)
+ s.Require().Equal(instance.Name, instances[0].Name)
+ s.Require().Equal(instance.ProviderName, pool.ProviderName)
}
func TestPoolsTestSuite(t *testing.T) {
diff --git a/database/sql/repositories.go b/database/sql/repositories.go
index 33e6011a..72b535e8 100644
--- a/database/sql/repositories.go
+++ b/database/sql/repositories.go
@@ -16,271 +16,241 @@ package sql
import (
"context"
+ "errors"
"fmt"
+ "log/slog"
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/util"
-
- "github.com/pkg/errors"
- uuid "github.com/satori/go.uuid"
- "gorm.io/datatypes"
+ "github.com/google/uuid"
"gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm-provider-common/util"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
)
-func (s *sqlDatabase) CreateRepository(ctx context.Context, owner, name, credentialsName, webhookSecret string) (params.Repository, error) {
+func (s *sqlDatabase) CreateRepository(ctx context.Context, owner, name string, credentials params.ForgeCredentials, webhookSecret string, poolBalancerType params.PoolBalancerType) (param params.Repository, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.RepositoryEntityType, common.CreateOperation, param)
+ }
+ }()
+
if webhookSecret == "" {
return params.Repository{}, errors.New("creating repo: missing secret")
}
- secret, err := util.Aes256EncodeString(webhookSecret, s.cfg.Passphrase)
+ secret, err := util.Seal([]byte(webhookSecret), []byte(s.cfg.Passphrase))
if err != nil {
return params.Repository{}, fmt.Errorf("failed to encrypt string")
}
+
newRepo := Repository{
- Name: name,
- Owner: owner,
- WebhookSecret: secret,
- CredentialsName: credentialsName,
+ Name: name,
+ Owner: owner,
+ WebhookSecret: secret,
+ PoolBalancerType: poolBalancerType,
}
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ switch credentials.ForgeType {
+ case params.GithubEndpointType:
+ newRepo.CredentialsID = &credentials.ID
+ case params.GiteaEndpointType:
+ newRepo.GiteaCredentialsID = &credentials.ID
+ default:
+ return runnerErrors.NewBadRequestError("unsupported credentials type")
+ }
- q := s.conn.Create(&newRepo)
- if q.Error != nil {
- return params.Repository{}, errors.Wrap(q.Error, "creating repository")
- }
-
- param, err := s.sqlToCommonRepository(newRepo)
+ newRepo.EndpointName = &credentials.Endpoint.Name
+ q := tx.Create(&newRepo)
+ if q.Error != nil {
+ return fmt.Errorf("error creating repository: %w", q.Error)
+ }
+ return nil
+ })
if err != nil {
- return params.Repository{}, errors.Wrap(err, "creating repository")
+ return params.Repository{}, fmt.Errorf("error creating repository: %w", err)
+ }
+
+ ret, err := s.GetRepositoryByID(ctx, newRepo.ID.String())
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error creating repository: %w", err)
+ }
+
+ return ret, nil
+}
+
+func (s *sqlDatabase) GetRepository(ctx context.Context, owner, name, endpointName string) (params.Repository, error) {
+ repo, err := s.getRepo(ctx, owner, name, endpointName)
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error fetching repo: %w", err)
+ }
+
+ param, err := s.sqlToCommonRepository(repo, true)
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error fetching repo: %w", err)
}
return param, nil
}
-func (s *sqlDatabase) GetRepository(ctx context.Context, owner, name string) (params.Repository, error) {
- repo, err := s.getRepo(ctx, owner, name)
- if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
- }
-
- param, err := s.sqlToCommonRepository(repo)
- if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
- }
-
- return param, nil
-}
-
-func (s *sqlDatabase) ListRepositories(ctx context.Context) ([]params.Repository, error) {
+func (s *sqlDatabase) ListRepositories(_ context.Context, filter params.RepositoryFilter) ([]params.Repository, error) {
var repos []Repository
- q := s.conn.Find(&repos)
+ q := s.conn.
+ Preload("Credentials").
+ Preload("GiteaCredentials").
+ Preload("Credentials.Endpoint").
+ Preload("GiteaCredentials.Endpoint").
+ Preload("Endpoint")
+ if filter.Owner != "" {
+ q = q.Where("owner = ?", filter.Owner)
+ }
+ if filter.Name != "" {
+ q = q.Where("name = ?", filter.Name)
+ }
+ if filter.Endpoint != "" {
+ q = q.Where("endpoint_name = ?", filter.Endpoint)
+ }
+ q = q.Find(&repos)
if q.Error != nil {
- return []params.Repository{}, errors.Wrap(q.Error, "fetching user from database")
+ return []params.Repository{}, fmt.Errorf("error fetching user from database: %w", q.Error)
}
ret := make([]params.Repository, len(repos))
for idx, val := range repos {
var err error
- ret[idx], err = s.sqlToCommonRepository(val)
+ ret[idx], err = s.sqlToCommonRepository(val, true)
if err != nil {
- return nil, errors.Wrap(err, "fetching repositories")
+ return nil, fmt.Errorf("error fetching repositories: %w", err)
}
}
return ret, nil
}
-func (s *sqlDatabase) DeleteRepository(ctx context.Context, repoID string) error {
- repo, err := s.getRepoByID(ctx, repoID)
+func (s *sqlDatabase) DeleteRepository(ctx context.Context, repoID string) (err error) {
+ repo, err := s.getRepoByID(ctx, s.conn, repoID, "Endpoint", "Credentials", "Credentials.Endpoint", "GiteaCredentials", "GiteaCredentials.Endpoint")
if err != nil {
- return errors.Wrap(err, "fetching repo")
+ return fmt.Errorf("error fetching repo: %w", err)
}
+ defer func(repo Repository) {
+ if err == nil {
+ asParam, innerErr := s.sqlToCommonRepository(repo, true)
+ if innerErr == nil {
+ s.sendNotify(common.RepositoryEntityType, common.DeleteOperation, asParam)
+ } else {
+ slog.With(slog.Any("error", innerErr)).ErrorContext(ctx, "error sending delete notification", "repo", repoID)
+ }
+ }
+ }(repo)
+
q := s.conn.Unscoped().Delete(&repo)
if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting repo")
+ return fmt.Errorf("error deleting repo: %w", q.Error)
}
return nil
}
-func (s *sqlDatabase) UpdateRepository(ctx context.Context, repoID string, param params.UpdateRepositoryParams) (params.Repository, error) {
- repo, err := s.getRepoByID(ctx, repoID)
- if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
- }
-
- if param.CredentialsName != "" {
- repo.CredentialsName = param.CredentialsName
- }
-
- if param.WebhookSecret != "" {
- secret, err := util.Aes256EncodeString(param.WebhookSecret, s.cfg.Passphrase)
- if err != nil {
- return params.Repository{}, fmt.Errorf("saving repo: failed to encrypt string: %w", err)
+func (s *sqlDatabase) UpdateRepository(ctx context.Context, repoID string, param params.UpdateEntityParams) (newParams params.Repository, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.RepositoryEntityType, common.UpdateOperation, newParams)
+ }
+ }()
+ var repo Repository
+ var creds GithubCredentials
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ var err error
+ repo, err = s.getRepoByID(ctx, tx, repoID)
+ if err != nil {
+ return fmt.Errorf("error fetching repo: %w", err)
+ }
+ if repo.EndpointName == nil {
+ return runnerErrors.NewUnprocessableError("repository has no endpoint")
}
- repo.WebhookSecret = secret
- }
- q := s.conn.Save(&repo)
- if q.Error != nil {
- return params.Repository{}, errors.Wrap(q.Error, "saving repo")
- }
+ if param.CredentialsName != "" {
+ creds, err = s.getGithubCredentialsByName(ctx, tx, param.CredentialsName, false)
+ if err != nil {
+ return fmt.Errorf("error fetching credentials: %w", err)
+ }
+ if creds.EndpointName == nil {
+ return runnerErrors.NewUnprocessableError("credentials have no endpoint")
+ }
- newParams, err := s.sqlToCommonRepository(repo)
+ if *creds.EndpointName != *repo.EndpointName {
+ return runnerErrors.NewBadRequestError("endpoint mismatch")
+ }
+ repo.CredentialsID = &creds.ID
+ }
+
+ if param.WebhookSecret != "" {
+ secret, err := util.Seal([]byte(param.WebhookSecret), []byte(s.cfg.Passphrase))
+ if err != nil {
+ return fmt.Errorf("saving repo: failed to encrypt string: %w", err)
+ }
+ repo.WebhookSecret = secret
+ }
+
+ if param.PoolBalancerType != "" {
+ repo.PoolBalancerType = param.PoolBalancerType
+ }
+
+ q := tx.Save(&repo)
+ if q.Error != nil {
+ return fmt.Errorf("error saving repo: %w", q.Error)
+ }
+
+ return nil
+ })
if err != nil {
- return params.Repository{}, errors.Wrap(err, "saving repo")
+ return params.Repository{}, fmt.Errorf("error saving repo: %w", err)
+ }
+
+ repo, err = s.getRepoByID(ctx, s.conn, repoID, "Endpoint", "Credentials", "Credentials.Endpoint", "GiteaCredentials", "GiteaCredentials.Endpoint")
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error updating enterprise: %w", err)
+ }
+
+ newParams, err = s.sqlToCommonRepository(repo, true)
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error saving repo: %w", err)
}
return newParams, nil
}
func (s *sqlDatabase) GetRepositoryByID(ctx context.Context, repoID string) (params.Repository, error) {
- repo, err := s.getRepoByID(ctx, repoID, "Pools")
+ preloadList := []string{
+ "Pools",
+ "Credentials",
+ "Endpoint",
+ "Credentials.Endpoint",
+ "GiteaCredentials",
+ "GiteaCredentials.Endpoint",
+ "Events",
+ }
+ repo, err := s.getRepoByID(ctx, s.conn, repoID, preloadList...)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
+ return params.Repository{}, fmt.Errorf("error fetching repo: %w", err)
}
- param, err := s.sqlToCommonRepository(repo)
+ param, err := s.sqlToCommonRepository(repo, true)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
+ return params.Repository{}, fmt.Errorf("error fetching repo: %w", err)
}
return param, nil
}
-func (s *sqlDatabase) CreateRepositoryPool(ctx context.Context, repoId string, param params.CreatePoolParams) (params.Pool, error) {
- if len(param.Tags) == 0 {
- return params.Pool{}, runnerErrors.NewBadRequestError("no tags specified")
- }
-
- repo, err := s.getRepoByID(ctx, repoId)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching repo")
- }
-
- newPool := Pool{
- ProviderName: param.ProviderName,
- MaxRunners: param.MaxRunners,
- MinIdleRunners: param.MinIdleRunners,
- RunnerPrefix: param.GetRunnerPrefix(),
- Image: param.Image,
- Flavor: param.Flavor,
- OSType: param.OSType,
- OSArch: param.OSArch,
- RepoID: repo.ID,
- Enabled: param.Enabled,
- RunnerBootstrapTimeout: param.RunnerBootstrapTimeout,
- }
-
- if len(param.ExtraSpecs) > 0 {
- newPool.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
- }
-
- _, err = s.getRepoPoolByUniqueFields(ctx, repoId, newPool.ProviderName, newPool.Image, newPool.Flavor)
- if err != nil {
- if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Pool{}, errors.Wrap(err, "creating pool")
- }
- } else {
- return params.Pool{}, runnerErrors.NewConflictError("pool with the same image and flavor already exists on this provider")
- }
-
- tags := []Tag{}
- for _, val := range param.Tags {
- t, err := s.getOrCreateTag(val)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching tag")
- }
- tags = append(tags, t)
- }
-
- q := s.conn.Create(&newPool)
- if q.Error != nil {
- return params.Pool{}, errors.Wrap(q.Error, "adding pool")
- }
-
- for _, tt := range tags {
- if err := s.conn.Model(&newPool).Association("Tags").Append(&tt); err != nil {
- return params.Pool{}, errors.Wrap(err, "saving tag")
- }
- }
-
- pool, err := s.getPoolByID(ctx, newPool.ID.String(), "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) ListRepoPools(ctx context.Context, repoID string) ([]params.Pool, error) {
- pools, err := s.getRepoPools(ctx, repoID, "Tags")
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
-
- ret := make([]params.Pool, len(pools))
- for idx, pool := range pools {
- ret[idx] = s.sqlToCommonPool(pool)
- }
-
- return ret, nil
-}
-
-func (s *sqlDatabase) GetRepositoryPool(ctx context.Context, repoID, poolID string) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.RepositoryPool, repoID, poolID, "Tags", "Instances")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return s.sqlToCommonPool(pool), nil
-}
-
-func (s *sqlDatabase) DeleteRepositoryPool(ctx context.Context, repoID, poolID string) error {
- pool, err := s.getEntityPool(ctx, params.RepositoryPool, repoID, poolID)
- if err != nil {
- return errors.Wrap(err, "looking up repo pool")
- }
- q := s.conn.Unscoped().Delete(&pool)
- if q.Error != nil && !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return errors.Wrap(q.Error, "deleting pool")
- }
- return nil
-}
-
-func (s *sqlDatabase) FindRepositoryPoolByTags(ctx context.Context, repoID string, tags []string) (params.Pool, error) {
- pool, err := s.findPoolByTags(repoID, "repo_id", tags)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool, nil
-}
-
-func (s *sqlDatabase) ListRepoInstances(ctx context.Context, repoID string) ([]params.Instance, error) {
- pools, err := s.getRepoPools(ctx, repoID, "Instances")
- if err != nil {
- return nil, errors.Wrap(err, "fetching repo")
- }
-
- ret := []params.Instance{}
- for _, pool := range pools {
- for _, instance := range pool.Instances {
- ret = append(ret, s.sqlToParamsInstance(instance))
- }
- }
- return ret, nil
-}
-
-func (s *sqlDatabase) UpdateRepositoryPool(ctx context.Context, repoID, poolID string, param params.UpdatePoolParams) (params.Pool, error) {
- pool, err := s.getEntityPool(ctx, params.RepositoryPool, repoID, poolID, "Tags", "Instances", "Enterprise", "Organization", "Repository")
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
-
- return s.updatePool(pool, param)
-}
-
-func (s *sqlDatabase) getRepo(ctx context.Context, owner, name string) (Repository, error) {
+func (s *sqlDatabase) getRepo(_ context.Context, owner, name, endpointName string) (Repository, error) {
var repo Repository
- q := s.conn.Where("name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE", name, owner).
+ q := s.conn.Where("name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE", name, owner, endpointName).
+ Preload("Credentials").
+ Preload("Credentials.Endpoint").
+ Preload("GiteaCredentials").
+ Preload("GiteaCredentials.Endpoint").
+ Preload("Endpoint").
First(&repo)
q = q.First(&repo)
@@ -289,94 +259,19 @@ func (s *sqlDatabase) getRepo(ctx context.Context, owner, name string) (Reposito
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Repository{}, runnerErrors.ErrNotFound
}
- return Repository{}, errors.Wrap(q.Error, "fetching repository from database")
+ return Repository{}, fmt.Errorf("error fetching repository from database: %w", q.Error)
}
return repo, nil
}
-func (s *sqlDatabase) findPoolByTags(id, poolType string, tags []string) (params.Pool, error) {
- if len(tags) == 0 {
- return params.Pool{}, runnerErrors.NewBadRequestError("missing tags")
- }
- u, err := uuid.FromString(id)
+func (s *sqlDatabase) getRepoByID(_ context.Context, tx *gorm.DB, id string, preload ...string) (Repository, error) {
+ u, err := uuid.Parse(id)
if err != nil {
- return params.Pool{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
- }
-
- var pools []Pool
- where := fmt.Sprintf("tags.name in ? and %s = ? and enabled = true", poolType)
- q := s.conn.Joins("JOIN pool_tags on pool_tags.pool_id=pools.id").
- Joins("JOIN tags on tags.id=pool_tags.tag_id").
- Group("pools.id").
- Preload("Tags").
- Having("count(1) = ?", len(tags)).
- Where(where, tags, u).Find(&pools)
-
- if q.Error != nil {
- if errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return params.Pool{}, runnerErrors.ErrNotFound
- }
- return params.Pool{}, errors.Wrap(q.Error, "fetching pool")
- }
-
- if len(pools) == 0 {
- return params.Pool{}, runnerErrors.ErrNotFound
- }
-
- return s.sqlToCommonPool(pools[0]), nil
-}
-
-func (s *sqlDatabase) getRepoPoolByUniqueFields(ctx context.Context, repoID string, provider, image, flavor string) (Pool, error) {
- repo, err := s.getRepoByID(ctx, repoID)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching repo")
- }
-
- q := s.conn
- var pool []Pool
- err = q.Model(&repo).Association("Pools").Find(&pool, "provider_name = ? and image = ? and flavor = ?", provider, image, flavor)
- if err != nil {
- return Pool{}, errors.Wrap(err, "fetching pool")
- }
- if len(pool) == 0 {
- return Pool{}, runnerErrors.ErrNotFound
- }
-
- return pool[0], nil
-}
-
-func (s *sqlDatabase) getRepoPools(ctx context.Context, repoID string, preload ...string) ([]Pool, error) {
- _, err := s.getRepoByID(ctx, repoID)
- if err != nil {
- return nil, errors.Wrap(err, "fetching repo")
- }
-
- q := s.conn
- if len(preload) > 0 {
- for _, item := range preload {
- q = q.Preload(item)
- }
- }
-
- var pools []Pool
- err = q.Model(&Pool{}).Where("repo_id = ?", repoID).
- Omit("extra_specs").
- Find(&pools).Error
- if err != nil {
- return nil, errors.Wrap(err, "fetching pool")
- }
-
- return pools, nil
-}
-
-func (s *sqlDatabase) getRepoByID(ctx context.Context, id string, preload ...string) (Repository, error) {
- u, err := uuid.FromString(id)
- if err != nil {
- return Repository{}, errors.Wrap(runnerErrors.ErrBadRequest, "parsing id")
+ return Repository{}, runnerErrors.NewBadRequestError("error parsing id: %s", err)
}
var repo Repository
- q := s.conn
+ q := tx
if len(preload) > 0 {
for _, field := range preload {
q = q.Preload(field)
@@ -388,7 +283,7 @@ func (s *sqlDatabase) getRepoByID(ctx context.Context, id string, preload ...str
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return Repository{}, runnerErrors.ErrNotFound
}
- return Repository{}, errors.Wrap(q.Error, "fetching repository from database")
+ return Repository{}, fmt.Errorf("error fetching repository from database: %w", q.Error)
}
return repo, nil
}
diff --git a/database/sql/repositories_test.go b/database/sql/repositories_test.go
index 5ddbfda3..b3c15eca 100644
--- a/database/sql/repositories_test.go
+++ b/database/sql/repositories_test.go
@@ -22,15 +22,17 @@ import (
"sort"
"testing"
- dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
- "github.com/cloudbase/garm/params"
-
"github.com/stretchr/testify/suite"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ "github.com/cloudbase/garm/auth"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type RepoTestFixtures struct {
@@ -38,16 +40,29 @@ type RepoTestFixtures struct {
CreateRepoParams params.CreateRepoParams
CreatePoolParams params.CreatePoolParams
CreateInstanceParams params.CreateInstanceParams
- UpdateRepoParams params.UpdateRepositoryParams
+ UpdateRepoParams params.UpdateEntityParams
UpdatePoolParams params.UpdatePoolParams
SQLMock sqlmock.Sqlmock
}
+func init() {
+ watcher.SetWatcher(&garmTesting.MockWatcher{})
+}
+
type RepoTestSuite struct {
suite.Suite
Store dbCommon.Store
StoreSQLMocked *sqlDatabase
Fixtures *RepoTestFixtures
+
+ adminCtx context.Context
+ adminUserID string
+
+ testCreds params.ForgeCredentials
+ testCredsGitea params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
+ giteaEndpoint params.ForgeEndpoint
}
func (s *RepoTestSuite) equalReposByName(expected, actual []params.Repository) {
@@ -81,21 +96,36 @@ func (s *RepoTestSuite) assertSQLMockExpectations() {
func (s *RepoTestSuite) SetupTest() {
// create testing sqlite database
+ ctx := context.Background()
+ watcher.InitWatcher(ctx)
+
db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
s.Store = db
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.adminCtx = adminCtx
+ s.adminUserID = auth.UserID(adminCtx)
+ s.Require().NotEmpty(s.adminUserID)
+
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.giteaEndpoint = garmTesting.CreateDefaultGiteaEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.testCredsGitea = garmTesting.CreateTestGiteaCredentials(adminCtx, "new-creds", db, s.T(), s.giteaEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+
// create some repository objects in the database, for testing purposes
repos := []params.Repository{}
for i := 1; i <= 3; i++ {
repo, err := db.CreateRepository(
- context.Background(),
+ adminCtx,
fmt.Sprintf("test-owner-%d", i),
fmt.Sprintf("test-repo-%d", i),
- fmt.Sprintf("test-creds-%d", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%d", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create database object (test-repo-%d): %v", i, err))
@@ -115,7 +145,7 @@ func (s *RepoTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -135,7 +165,7 @@ func (s *RepoTestSuite) SetupTest() {
CreateRepoParams: params.CreateRepoParams{
Owner: "test-owner-repo",
Name: "test-repo",
- CredentialsName: "test-creds-repo",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-webhook-secret",
},
CreatePoolParams: params.CreatePoolParams{
@@ -147,14 +177,14 @@ func (s *RepoTestSuite) SetupTest() {
Flavor: "test-flavor",
OSType: "windows",
OSArch: "amd64",
- Tags: []string{"self-hosted", "arm64", "windows"},
+ Tags: []string{"arm64-windows-runner"},
},
CreateInstanceParams: params.CreateInstanceParams{
Name: "test-instance",
OSType: "linux",
},
- UpdateRepoParams: params.UpdateRepositoryParams{
- CredentialsName: "test-update-creds",
+ UpdateRepoParams: params.UpdateEntityParams{
+ CredentialsName: s.secondaryTestCreds.Name,
WebhookSecret: "test-update-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -168,26 +198,93 @@ func (s *RepoTestSuite) SetupTest() {
s.Fixtures = fixtures
}
+func (s *RepoTestSuite) TearDownTest() {
+ watcher.CloseWatcher()
+}
+
func (s *RepoTestSuite) TestCreateRepository() {
// call tested function
repo, err := s.Store.CreateRepository(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateRepoParams.Owner,
s.Fixtures.CreateRepoParams.Name,
- s.Fixtures.CreateRepoParams.CredentialsName,
+ s.testCreds,
s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin,
)
// assertions
s.Require().Nil(err)
- storeRepo, err := s.Store.GetRepositoryByID(context.Background(), repo.ID)
+ storeRepo, err := s.Store.GetRepositoryByID(s.adminCtx, repo.ID)
if err != nil {
s.FailNow(fmt.Sprintf("failed to get repository by id: %v", err))
}
s.Require().Equal(storeRepo.Owner, repo.Owner)
s.Require().Equal(storeRepo.Name, repo.Name)
- s.Require().Equal(storeRepo.CredentialsName, repo.CredentialsName)
+ s.Require().Equal(storeRepo.Credentials.Name, repo.Credentials.Name)
s.Require().Equal(storeRepo.WebhookSecret, repo.WebhookSecret)
+
+ entity, err := repo.GetEntity()
+ s.Require().Nil(err)
+ s.Require().Equal(s.Fixtures.CreateRepoParams.Owner, entity.Owner)
+ s.Require().Equal(entity.EntityType, params.ForgeEntityTypeRepository)
+
+ forgeType, err := entity.GetForgeType()
+ s.Require().Nil(err)
+ s.Require().Equal(forgeType, params.GithubEndpointType)
+}
+
+func (s *RepoTestSuite) TestCreateRepositoryGitea() {
+ // call tested function
+ repo, err := s.Store.CreateRepository(
+ s.adminCtx,
+ s.Fixtures.CreateRepoParams.Owner,
+ s.Fixtures.CreateRepoParams.Name,
+ s.testCredsGitea,
+ s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin,
+ )
+
+ // assertions
+ s.Require().Nil(err)
+ storeRepo, err := s.Store.GetRepositoryByID(s.adminCtx, repo.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to get repository by id: %v", err))
+ }
+ s.Require().Equal(storeRepo.Owner, repo.Owner)
+ s.Require().Equal(storeRepo.Name, repo.Name)
+ s.Require().Equal(storeRepo.Credentials.Name, repo.Credentials.Name)
+ s.Require().Equal(storeRepo.WebhookSecret, repo.WebhookSecret)
+
+ entity, err := repo.GetEntity()
+ s.Require().Nil(err)
+ s.Require().Equal(repo.ID, entity.ID)
+ s.Require().Equal(entity.EntityType, params.ForgeEntityTypeRepository)
+
+ forgeType, err := entity.GetForgeType()
+ s.Require().Nil(err)
+ s.Require().Equal(forgeType, params.GiteaEndpointType)
+}
+
+func (s *RepoTestSuite) TestCreateRepositoryInvalidForgeType() {
+ // call tested function
+ _, err := s.Store.CreateRepository(
+ s.adminCtx,
+ s.Fixtures.CreateRepoParams.Owner,
+ s.Fixtures.CreateRepoParams.Name,
+ params.ForgeCredentials{
+ Name: "test-creds",
+ ForgeType: "invalid-forge-type",
+ Endpoint: params.ForgeEndpoint{
+ Name: "test-endpoint",
+ },
+ },
+ s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin,
+ )
+
+ s.Require().NotNil(err)
+ s.Require().Equal("error creating repository: unsupported credentials type", err.Error())
}
func (s *RepoTestSuite) TestCreateRepositoryInvalidDBPassphrase() {
@@ -197,18 +294,19 @@ func (s *RepoTestSuite) TestCreateRepositoryInvalidDBPassphrase() {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
// make sure we use a 'sqlDatabase' struct with a wrong 'cfg.Passphrase'
- cfg.Passphrase = "wrong-passphrase" // it must have a size different than 32
+ cfg.Passphrase = wrongPassphrase // it must have a size different than 32
sqlDB := &sqlDatabase{
conn: conn,
cfg: cfg,
}
_, err = sqlDB.CreateRepository(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateRepoParams.Owner,
s.Fixtures.CreateRepoParams.Name,
- s.Fixtures.CreateRepoParams.CredentialsName,
+ s.testCreds,
s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin,
)
s.Require().NotNil(err)
@@ -223,20 +321,21 @@ func (s *RepoTestSuite) TestCreateRepositoryInvalidDBCreateErr() {
s.Fixtures.SQLMock.ExpectRollback()
_, err := s.StoreSQLMocked.CreateRepository(
- context.Background(),
+ s.adminCtx,
s.Fixtures.CreateRepoParams.Owner,
s.Fixtures.CreateRepoParams.Name,
- s.Fixtures.CreateRepoParams.CredentialsName,
+ s.testCreds,
s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin,
)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("creating repository: creating repo mock error", err.Error())
+ s.Require().Equal("error creating repository: error creating repository: creating repo mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestGetRepository() {
- repo, err := s.Store.GetRepository(context.Background(), s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Name)
+ repo, err := s.Store.GetRepository(s.adminCtx, s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Endpoint.Name)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Repos[0].Owner, repo.Owner)
@@ -245,7 +344,7 @@ func (s *RepoTestSuite) TestGetRepository() {
}
func (s *RepoTestSuite) TestGetRepositoryCaseInsensitive() {
- repo, err := s.Store.GetRepository(context.Background(), "TeSt-oWnEr-1", "TeSt-rEpO-1")
+ repo, err := s.Store.GetRepository(s.adminCtx, "TeSt-oWnEr-1", "TeSt-rEpO-1", "github.com")
s.Require().Nil(err)
s.Require().Equal("test-owner-1", repo.Owner)
@@ -253,82 +352,163 @@ func (s *RepoTestSuite) TestGetRepositoryCaseInsensitive() {
}
func (s *RepoTestSuite) TestGetRepositoryNotFound() {
- _, err := s.Store.GetRepository(context.Background(), "dummy-owner", "dummy-name")
+ _, err := s.Store.GetRepository(s.adminCtx, "dummy-owner", "dummy-name", "github.com")
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: not found", err.Error())
+ s.Require().Equal("error fetching repo: not found", err.Error())
}
func (s *RepoTestSuite) TestGetRepositoryDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Endpoint.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"name", "owner"}).AddRow(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id`,`repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE and endpoint_name = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id`,`repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Endpoint.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"name", "owner"}).AddRow(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner))
- _, err := s.StoreSQLMocked.GetRepository(context.Background(), s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Name)
+ _, err := s.StoreSQLMocked.GetRepository(s.adminCtx, s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Endpoint.Name)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: missing secret", err.Error())
+ s.Require().Equal("error fetching repo: missing secret", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestListRepositories() {
- repos, err := s.Store.ListRepositories((context.Background()))
+ repos, err := s.Store.ListRepositories(s.adminCtx, params.RepositoryFilter{})
s.Require().Nil(err)
s.equalReposByName(s.Fixtures.Repos, repos)
}
+func (s *RepoTestSuite) TestListRepositoriesWithFilters() {
+ repo, err := s.Store.CreateRepository(
+ s.adminCtx,
+ "test-owner",
+ "test-repo",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ repo2, err := s.Store.CreateRepository(
+ s.adminCtx,
+ "test-owner",
+ "test-repo",
+ s.testCredsGitea,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ repo3, err := s.Store.CreateRepository(
+ s.adminCtx,
+ "test-owner",
+ "test-repo2",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ repo4, err := s.Store.CreateRepository(
+ s.adminCtx,
+ "test-owner2",
+ "test-repo",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+
+ repos, err := s.Store.ListRepositories(
+ s.adminCtx,
+ params.RepositoryFilter{
+ Name: "test-repo",
+ })
+
+ s.Require().Nil(err)
+ s.equalReposByName([]params.Repository{repo, repo2, repo4}, repos)
+
+ repos, err = s.Store.ListRepositories(
+ s.adminCtx,
+ params.RepositoryFilter{
+ Name: "test-repo",
+ Owner: "test-owner",
+ })
+
+ s.Require().Nil(err)
+ s.equalReposByName([]params.Repository{repo, repo2}, repos)
+
+ repos, err = s.Store.ListRepositories(
+ s.adminCtx,
+ params.RepositoryFilter{
+ Name: "test-repo",
+ Owner: "test-owner",
+ Endpoint: s.giteaEndpoint.Name,
+ })
+
+ s.Require().Nil(err)
+ s.equalReposByName([]params.Repository{repo2}, repos)
+
+ repos, err = s.Store.ListRepositories(
+ s.adminCtx,
+ params.RepositoryFilter{
+ Name: "test-repo2",
+ })
+
+ s.Require().Nil(err)
+ s.equalReposByName([]params.Repository{repo3}, repos)
+}
+
func (s *RepoTestSuite) TestListRepositoriesDBFetchErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE `repositories`.`deleted_at` IS NULL")).
WillReturnError(fmt.Errorf("fetching user from database mock error"))
- _, err := s.StoreSQLMocked.ListRepositories(context.Background())
+ _, err := s.StoreSQLMocked.ListRepositories(s.adminCtx, params.RepositoryFilter{})
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching user from database: fetching user from database mock error", err.Error())
+ s.Require().Equal("error fetching user from database: fetching user from database mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestListRepositoriesDBDecryptingErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE `repositories`.`deleted_at` IS NULL")).
WillReturnRows(sqlmock.NewRows([]string{"id", "webhook_secret"}).AddRow(s.Fixtures.Repos[0].ID, s.Fixtures.Repos[0].WebhookSecret))
- _, err := s.StoreSQLMocked.ListRepositories(context.Background())
+ _, err := s.StoreSQLMocked.ListRepositories(s.adminCtx, params.RepositoryFilter{})
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching repositories: decrypting secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error fetching repositories: error decrypting secret: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestDeleteRepository() {
- err := s.Store.DeleteRepository(context.Background(), s.Fixtures.Repos[0].ID)
+ err := s.Store.DeleteRepository(s.adminCtx, s.Fixtures.Repos[0].ID)
s.Require().Nil(err)
- _, err = s.Store.GetRepositoryByID(context.Background(), s.Fixtures.Repos[0].ID)
+ _, err = s.Store.GetRepositoryByID(s.adminCtx, s.Fixtures.Repos[0].ID)
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: not found", err.Error())
+ s.Require().Equal("error fetching repo: not found", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryInvalidRepoID() {
- err := s.Store.DeleteRepository(context.Background(), "dummy-repo-id")
+ err := s.Store.DeleteRepository(s.adminCtx, "dummy-repo-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching repo: error parsing id: invalid UUID length: 13", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryDBRemoveErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@@ -337,112 +517,151 @@ func (s *RepoTestSuite) TestDeleteRepositoryDBRemoveErr() {
WillReturnError(fmt.Errorf("mocked deleting repo error"))
s.Fixtures.SQLMock.ExpectRollback()
- err := s.StoreSQLMocked.DeleteRepository(context.Background(), s.Fixtures.Repos[0].ID)
+ err := s.StoreSQLMocked.DeleteRepository(s.adminCtx, s.Fixtures.Repos[0].ID)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("deleting repo: mocked deleting repo error", err.Error())
+ s.Require().Equal("error deleting repo: mocked deleting repo error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestUpdateRepository() {
- repo, err := s.Store.UpdateRepository(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
+ repo, err := s.Store.UpdateRepository(s.adminCtx, s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, repo.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, repo.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, repo.WebhookSecret)
}
func (s *RepoTestSuite) TestUpdateRepositoryInvalidRepoID() {
- _, err := s.Store.UpdateRepository(context.Background(), "dummy-repo-id", s.Fixtures.UpdateRepoParams)
+ _, err := s.Store.UpdateRepository(s.adminCtx, "dummy-repo-id", s.Fixtures.UpdateRepoParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: parsing id: invalid request", err.Error())
+ s.Require().Equal("error saving repo: error fetching repo: error parsing id: invalid UUID length: 13", err.Error())
}
func (s *RepoTestSuite) TestUpdateRepositoryDBEncryptErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
-
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- _, err := s.StoreSQLMocked.UpdateRepository(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Repos[0].ID, s.githubEndpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
+
+ _, err := s.StoreSQLMocked.UpdateRepository(s.adminCtx, s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving repo: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error saving repo: saving repo: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestUpdateRepositoryDBSaveErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Repos[0].ID, s.githubEndpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
s.Fixtures.SQLMock.
ExpectExec(("UPDATE `repositories` SET")).
WillReturnError(fmt.Errorf("saving repo mock error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateRepository(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateRepository(s.adminCtx, s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving repo: saving repo mock error", err.Error())
+ s.Require().Equal("error saving repo: error saving repo: saving repo mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestUpdateRepositoryDBDecryptingErr() {
- s.StoreSQLMocked.cfg.Passphrase = "wrong-passphrase"
- s.Fixtures.UpdateRepoParams.WebhookSecret = "webhook-secret"
-
+ s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
+ s.Fixtures.UpdateRepoParams.WebhookSecret = webhookSecret
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.Fixtures.Repos[0].ID, s.githubEndpoint.Name))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_credentials` WHERE user_id = ? AND name = ? AND `github_credentials`.`deleted_at` IS NULL ORDER BY `github_credentials`.`id` LIMIT ?")).
+ WithArgs(s.adminUserID, s.secondaryTestCreds.Name, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id", "endpoint_name"}).
+ AddRow(s.secondaryTestCreds.ID, s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectQuery(regexp.QuoteMeta("SELECT * FROM `github_endpoints` WHERE `github_endpoints`.`name` = ? AND `github_endpoints`.`deleted_at` IS NULL")).
+ WithArgs(s.testCreds.Endpoint.Name).
+ WillReturnRows(sqlmock.NewRows([]string{"name"}).
+ AddRow(s.secondaryTestCreds.Endpoint.Name))
+ s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.UpdateRepository(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
+ _, err := s.StoreSQLMocked.UpdateRepository(s.adminCtx, s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving repo: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.Require().Equal("error saving repo: saving repo: failed to encrypt string: invalid passphrase length (expected length 32 characters)", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestGetRepositoryByID() {
- repo, err := s.Store.GetRepositoryByID(context.Background(), s.Fixtures.Repos[0].ID)
+ repo, err := s.Store.GetRepositoryByID(s.adminCtx, s.Fixtures.Repos[0].ID)
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.Repos[0].ID, repo.ID)
}
func (s *RepoTestSuite) TestGetRepositoryByIDInvalidRepoID() {
- _, err := s.Store.GetRepositoryByID(context.Background(), "dummy-repo-id")
+ _, err := s.Store.GetRepositoryByID(s.adminCtx, "dummy-repo-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching repo: error parsing id: invalid UUID length: 13", err.Error())
}
func (s *RepoTestSuite) TestGetRepositoryByIDDBDecryptingErr() {
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repository_events` WHERE `repository_events`.`repo_id` = ? AND `repository_events`.`deleted_at` IS NULL")).
+ WithArgs(s.Fixtures.Repos[0].ID).
+ WillReturnRows(sqlmock.NewRows([]string{"repo_id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND `pools`.`deleted_at` IS NULL")).
WithArgs(s.Fixtures.Repos[0].ID).
WillReturnRows(sqlmock.NewRows([]string{"repo_id"}).AddRow(s.Fixtures.Repos[0].ID))
- _, err := s.StoreSQLMocked.GetRepositoryByID(context.Background(), s.Fixtures.Repos[0].ID)
+ _, err := s.StoreSQLMocked.GetRepositoryByID(s.adminCtx, s.Fixtures.Repos[0].ID)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: missing secret", err.Error())
+ s.Require().Equal("error fetching repo: missing secret", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestCreateRepositoryPool() {
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().Nil(err)
- repo, err := s.Store.GetRepositoryByID(context.Background(), s.Fixtures.Repos[0].ID)
+ repo, err := s.Store.GetRepositoryByID(s.adminCtx, s.Fixtures.Repos[0].ID)
if err != nil {
s.FailNow(fmt.Sprintf("cannot get repo by ID: %v", err))
}
@@ -455,216 +674,122 @@ func (s *RepoTestSuite) TestCreateRepositoryPool() {
func (s *RepoTestSuite) TestCreateRepositoryPoolMissingTags() {
s.Fixtures.CreatePoolParams.Tags = []string{}
-
- _, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ _, err = s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
s.Require().Equal("no tags specified", err.Error())
}
func (s *RepoTestSuite) TestCreateRepositoryPoolInvalidRepoID() {
- _, err := s.Store.CreateRepositoryPool(context.Background(), "dummy-repo-id", s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ _, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: parsing id: invalid request", err.Error())
-}
-
-func (s *RepoTestSuite) TestCreateRepositoryPoolDBCreateErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WillReturnError(fmt.Errorf("mocked creating pool error"))
-
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("creating pool: fetching pool: mocked creating pool error", err.Error())
-}
-
-func (s *RepoTestSuite) TestCreateRepositoryPoolDBPoolAlreadyExistErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id", "provider_name", "image", "flavor"}).
- AddRow(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor))
-
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
-
- s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("pool with the same image and flavor already exists on this provider", err.Error())
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *RepoTestSuite) TestCreateRepositoryPoolDBFetchTagErr() {
+ s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked fetching tag error"))
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching tag: fetching tag from database: mocked fetching tag error", err.Error())
+ s.Require().Equal("error creating tag: error fetching tag from database: mocked fetching tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestCreateRepositoryPoolDBAddingPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnError(fmt.Errorf("mocked adding pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("adding pool: mocked adding pool error", err.Error())
+ s.Require().Equal("error creating pool: mocked adding pool error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestCreateRepositoryPoolDBSaveTagErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnError(fmt.Errorf("mocked saving tag error"))
s.Fixtures.SQLMock.ExpectRollback()
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
- s.assertSQLMockExpectations()
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("saving tag: mocked saving tag error", err.Error())
+ s.Require().Equal("error associating tags: mocked saving tag error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestCreateRepositoryPoolDBFetchPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND (provider_name = ? and image = ? and flavor = ?) AND `pools`.`deleted_at` IS NULL")).
- WithArgs(
- s.Fixtures.Repos[0].ID,
- s.Fixtures.CreatePoolParams.ProviderName,
- s.Fixtures.CreatePoolParams.Image,
- s.Fixtures.CreatePoolParams.Flavor).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
- WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Repos[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? COLLATE NOCASE AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
+ WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `pools`")).
WillReturnResult(sqlmock.NewResult(1, 1))
- s.Fixtures.SQLMock.ExpectCommit()
- s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("UPDATE `pools` SET")).
WillReturnResult(sqlmock.NewResult(1, 1))
@@ -676,160 +801,165 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBFetchPoolErr() {
WillReturnResult(sqlmock.NewResult(1, 1))
s.Fixtures.SQLMock.ExpectCommit()
s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
- _, err := s.StoreSQLMocked.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+
+ _, err = s.StoreSQLMocked.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: not found", err.Error())
+ s.Require().Equal("error fetching pool by ID: not found", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *RepoTestSuite) TestListRepoPools() {
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
repoPools := []params.Pool{}
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Flavor = fmt.Sprintf("test-flavor-%d", i)
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
repoPools = append(repoPools, pool)
}
- pools, err := s.Store.ListRepoPools(context.Background(), s.Fixtures.Repos[0].ID)
+ pools, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().Nil(err)
garmTesting.EqualDBEntityID(s.T(), repoPools, pools)
}
func (s *RepoTestSuite) TestListRepoPoolsInvalidRepoID() {
- _, err := s.Store.ListRepoPools(context.Background(), "dummy-repo-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ _, err := s.Store.ListEntityPools(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching pools: fetching repo: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pools: error parsing id: invalid request", err.Error())
}
func (s *RepoTestSuite) TestGetRepositoryPool() {
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
- repoPool, err := s.Store.GetRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, pool.ID)
+ repoPool, err := s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
s.Require().Equal(repoPool.ID, pool.ID)
}
func (s *RepoTestSuite) TestGetRepositoryPoolInvalidRepoID() {
- _, err := s.Store.GetRepositoryPool(context.Background(), "dummy-repo-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ _, err := s.Store.GetEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("fetching pool: error parsing id: invalid request", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryPool() {
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
- err = s.Store.DeleteRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, pool.ID)
+ err = s.Store.DeleteEntityPool(s.adminCtx, entity, pool.ID)
s.Require().Nil(err)
- _, err = s.Store.GetOrganizationPool(context.Background(), s.Fixtures.Repos[0].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Store.GetEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryPoolInvalidRepoID() {
- err := s.Store.DeleteRepositoryPool(context.Background(), "dummy-repo-id", "dummy-pool-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ err := s.Store.DeleteEntityPool(s.adminCtx, entity, "dummy-pool-id")
s.Require().NotNil(err)
- s.Require().Equal("looking up repo pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error parsing id: invalid request", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryPoolDBDeleteErr() {
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (id = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
- WithArgs(pool.ID, s.Fixtures.Repos[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"repo_id", "id"}).AddRow(s.Fixtures.Repos[0].ID, pool.ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
- ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE `pools`.`id` = ?")).
- WithArgs(pool.ID).
+ ExpectExec(regexp.QuoteMeta("DELETE FROM `pools` WHERE id = ? and repo_id = ?")).
+ WithArgs(pool.ID, s.Fixtures.Repos[0].ID).
WillReturnError(fmt.Errorf("mocked deleting pool error"))
s.Fixtures.SQLMock.ExpectRollback()
- err = s.StoreSQLMocked.DeleteRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, pool.ID)
-
+ err = s.StoreSQLMocked.DeleteEntityPool(s.adminCtx, entity, pool.ID)
+ s.Require().NotNil(err)
+ s.Require().Equal("error removing pool: mocked deleting pool error", err.Error())
s.assertSQLMockExpectations()
- s.Require().NotNil(err)
- s.Require().Equal("deleting pool: mocked deleting pool error", err.Error())
-}
-
-func (s *RepoTestSuite) TestFindRepositoryPoolByTags() {
- repoPool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
- if err != nil {
- s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
- }
-
- pool, err := s.Store.FindRepositoryPoolByTags(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams.Tags)
- s.Require().Nil(err)
- s.Require().Equal(repoPool.ID, pool.ID)
- s.Require().Equal(repoPool.Image, pool.Image)
- s.Require().Equal(repoPool.Flavor, pool.Flavor)
-}
-
-func (s *RepoTestSuite) TestFindRepositoryPoolByTagsMissingTags() {
- tags := []string{}
-
- _, err := s.Store.FindRepositoryPoolByTags(context.Background(), s.Fixtures.Repos[0].ID, tags)
-
- s.Require().NotNil(err)
- s.Require().Equal("fetching pool: missing tags", err.Error())
}
func (s *RepoTestSuite) TestListRepoInstances() {
- pool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ pool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
poolInstances := []params.Instance{}
for i := 1; i <= 3; i++ {
s.Fixtures.CreateInstanceParams.Name = fmt.Sprintf("test-repo-%d", i)
- instance, err := s.Store.CreateInstance(context.Background(), pool.ID, s.Fixtures.CreateInstanceParams)
+ instance, err := s.Store.CreateInstance(s.adminCtx, pool.ID, s.Fixtures.CreateInstanceParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create instance: %s", err))
}
poolInstances = append(poolInstances, instance)
}
- instances, err := s.Store.ListRepoInstances(context.Background(), s.Fixtures.Repos[0].ID)
+ instances, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().Nil(err)
s.equalInstancesByID(poolInstances, instances)
}
func (s *RepoTestSuite) TestListRepoInstancesInvalidRepoID() {
- _, err := s.Store.ListRepoInstances(context.Background(), "dummy-repo-id")
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ _, err := s.Store.ListEntityInstances(s.adminCtx, entity)
s.Require().NotNil(err)
- s.Require().Equal("fetching repo: fetching repo: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching entity: error parsing id: invalid request", err.Error())
}
func (s *RepoTestSuite) TestUpdateRepositoryPool() {
- repoPool, err := s.Store.CreateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.CreatePoolParams)
+ entity, err := s.Fixtures.Repos[0].GetEntity()
+ s.Require().Nil(err)
+ repoPool, err := s.Store.CreateEntityPool(s.adminCtx, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
- pool, err := s.Store.UpdateRepositoryPool(context.Background(), s.Fixtures.Repos[0].ID, repoPool.ID, s.Fixtures.UpdatePoolParams)
+ pool, err := s.Store.UpdateEntityPool(s.adminCtx, entity, repoPool.ID, s.Fixtures.UpdatePoolParams)
s.Require().Nil(err)
s.Require().Equal(*s.Fixtures.UpdatePoolParams.MaxRunners, pool.MaxRunners)
@@ -839,10 +969,37 @@ func (s *RepoTestSuite) TestUpdateRepositoryPool() {
}
func (s *RepoTestSuite) TestUpdateRepositoryPoolInvalidRepoID() {
- _, err := s.Store.UpdateRepositoryPool(context.Background(), "dummy-org-id", "dummy-repo-id", s.Fixtures.UpdatePoolParams)
+ entity := params.ForgeEntity{
+ ID: "dummy-repo-id",
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ _, err := s.Store.UpdateEntityPool(s.adminCtx, entity, "dummy-repo-id", s.Fixtures.UpdatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error parsing id: invalid request", err.Error())
+}
+
+func (s *RepoTestSuite) TestAddRepoEntityEvent() {
+ repo, err := s.Store.CreateRepository(
+ s.adminCtx,
+ s.Fixtures.CreateRepoParams.Owner,
+ s.Fixtures.CreateRepoParams.Name,
+ s.testCreds,
+ s.Fixtures.CreateRepoParams.WebhookSecret,
+ params.PoolBalancerTypeRoundRobin)
+
+ s.Require().Nil(err)
+ entity, err := repo.GetEntity()
+ s.Require().Nil(err)
+ err = s.Store.AddEntityEvent(s.adminCtx, entity, params.StatusEvent, params.EventInfo, "this is a test", 20)
+ s.Require().Nil(err)
+
+ repo, err = s.Store.GetRepositoryByID(s.adminCtx, repo.ID)
+ s.Require().Nil(err)
+ s.Require().Equal(1, len(repo.Events))
+ s.Require().Equal(params.StatusEvent, repo.Events[0].EventType)
+ s.Require().Equal(params.EventInfo, repo.Events[0].EventLevel)
+ s.Require().Equal("this is a test", repo.Events[0].Message)
}
func TestRepoTestSuite(t *testing.T) {
diff --git a/database/sql/scaleset_instances.go b/database/sql/scaleset_instances.go
new file mode 100644
index 00000000..457c99b5
--- /dev/null
+++ b/database/sql/scaleset_instances.go
@@ -0,0 +1,86 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+)
+
+func (s *sqlDatabase) CreateScaleSetInstance(_ context.Context, scaleSetID uint, param params.CreateInstanceParams) (instance params.Instance, err error) {
+ scaleSet, err := s.getScaleSetByID(s.conn, scaleSetID)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.InstanceEntityType, common.CreateOperation, instance)
+ }
+ }()
+
+ var secret []byte
+ if len(param.JitConfiguration) > 0 {
+ secret, err = s.marshalAndSeal(param.JitConfiguration)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error marshalling jit config: %w", err)
+ }
+ }
+
+ newInstance := Instance{
+ ScaleSet: scaleSet,
+ Name: param.Name,
+ Status: param.Status,
+ RunnerStatus: param.RunnerStatus,
+ OSType: param.OSType,
+ OSArch: param.OSArch,
+ CallbackURL: param.CallbackURL,
+ MetadataURL: param.MetadataURL,
+ GitHubRunnerGroup: param.GitHubRunnerGroup,
+ JitConfiguration: secret,
+ AgentID: param.AgentID,
+ }
+ q := s.conn.Create(&newInstance)
+ if q.Error != nil {
+ return params.Instance{}, fmt.Errorf("error creating instance: %w", q.Error)
+ }
+
+ return s.sqlToParamsInstance(newInstance)
+}
+
+func (s *sqlDatabase) ListScaleSetInstances(_ context.Context, scalesetID uint) ([]params.Instance, error) {
+ var instances []Instance
+ query := s.conn.
+ Preload("ScaleSet").
+ Preload("Job").
+ Where("scale_set_fk_id = ?", scalesetID)
+
+ if err := query.Find(&instances); err.Error != nil {
+ return nil, fmt.Errorf("error fetching instances: %w", err.Error)
+ }
+
+ var err error
+ ret := make([]params.Instance, len(instances))
+ for idx, inst := range instances {
+ ret[idx], err = s.sqlToParamsInstance(inst)
+ if err != nil {
+ return nil, fmt.Errorf("error converting instance: %w", err)
+ }
+ }
+ return ret, nil
+}
diff --git a/database/sql/scalesets.go b/database/sql/scalesets.go
new file mode 100644
index 00000000..5877ad5c
--- /dev/null
+++ b/database/sql/scalesets.go
@@ -0,0 +1,458 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/google/uuid"
+ "gorm.io/datatypes"
+ "gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+)
+
+func (s *sqlDatabase) ListAllScaleSets(_ context.Context) ([]params.ScaleSet, error) {
+ var scaleSets []ScaleSet
+
+ q := s.conn.Model(&ScaleSet{}).
+ Preload("Organization").
+ Preload("Organization.Endpoint").
+ Preload("Repository").
+ Preload("Repository.Endpoint").
+ Preload("Enterprise").
+ Preload("Enterprise.Endpoint").
+ Omit("extra_specs").
+ Omit("status_messages").
+ Find(&scaleSets)
+ if q.Error != nil {
+ return nil, fmt.Errorf("error fetching all scale sets: %w", q.Error)
+ }
+
+ ret := make([]params.ScaleSet, len(scaleSets))
+ var err error
+ for idx, val := range scaleSets {
+ ret[idx], err = s.sqlToCommonScaleSet(val)
+ if err != nil {
+ return nil, fmt.Errorf("error converting scale sets: %w", err)
+ }
+ }
+ return ret, nil
+}
+
+func (s *sqlDatabase) CreateEntityScaleSet(_ context.Context, entity params.ForgeEntity, param params.CreateScaleSetParams) (scaleSet params.ScaleSet, err error) {
+ if err := param.Validate(); err != nil {
+ return params.ScaleSet{}, fmt.Errorf("failed to validate create params: %w", err)
+ }
+
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.ScaleSetEntityType, common.CreateOperation, scaleSet)
+ }
+ }()
+
+ newScaleSet := ScaleSet{
+ Name: param.Name,
+ ScaleSetID: param.ScaleSetID,
+ DisableUpdate: param.DisableUpdate,
+ ProviderName: param.ProviderName,
+ RunnerPrefix: param.GetRunnerPrefix(),
+ MaxRunners: param.MaxRunners,
+ MinIdleRunners: param.MinIdleRunners,
+ RunnerBootstrapTimeout: param.RunnerBootstrapTimeout,
+ Image: param.Image,
+ Flavor: param.Flavor,
+ OSType: param.OSType,
+ OSArch: param.OSArch,
+ Enabled: param.Enabled,
+ GitHubRunnerGroup: param.GitHubRunnerGroup,
+ State: params.ScaleSetPendingCreate,
+ }
+
+ if len(param.ExtraSpecs) > 0 {
+ newScaleSet.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
+ }
+
+ entityID, err := uuid.Parse(entity.ID)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ switch entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ newScaleSet.RepoID = &entityID
+ case params.ForgeEntityTypeOrganization:
+ newScaleSet.OrgID = &entityID
+ case params.ForgeEntityTypeEnterprise:
+ newScaleSet.EnterpriseID = &entityID
+ }
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ if err := s.hasGithubEntity(tx, entity.EntityType, entity.ID); err != nil {
+ return fmt.Errorf("error checking entity existence: %w", err)
+ }
+
+ q := tx.Create(&newScaleSet)
+ if q.Error != nil {
+ return fmt.Errorf("error creating scale set: %w", q.Error)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return params.ScaleSet{}, err
+ }
+
+ dbScaleSet, err := s.getScaleSetByID(s.conn, newScaleSet.ID, "Instances", "Enterprise", "Organization", "Repository")
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ return s.sqlToCommonScaleSet(dbScaleSet)
+}
+
+func (s *sqlDatabase) listEntityScaleSets(tx *gorm.DB, entityType params.ForgeEntityType, entityID string, preload ...string) ([]ScaleSet, error) {
+ if _, err := uuid.Parse(entityID); err != nil {
+ return nil, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if err := s.hasGithubEntity(tx, entityType, entityID); err != nil {
+ return nil, fmt.Errorf("error checking entity existence: %w", err)
+ }
+
+ var preloadEntity string
+ var fieldName string
+ switch entityType {
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ preloadEntity = repositoryFieldName
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ preloadEntity = organizationFieldName
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
+ preloadEntity = enterpriseFieldName
+ default:
+ return nil, fmt.Errorf("invalid entityType: %v", entityType)
+ }
+
+ q := tx
+ q = q.Preload(preloadEntity)
+ if len(preload) > 0 {
+ for _, item := range preload {
+ q = q.Preload(item)
+ }
+ }
+
+ var scaleSets []ScaleSet
+ condition := fmt.Sprintf("%s = ?", fieldName)
+ err := q.Model(&ScaleSet{}).
+ Where(condition, entityID).
+ Omit("extra_specs").
+ Omit("status_messages").
+ Find(&scaleSets).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return []ScaleSet{}, nil
+ }
+ return nil, fmt.Errorf("error fetching scale sets: %w", err)
+ }
+
+ return scaleSets, nil
+}
+
+func (s *sqlDatabase) ListEntityScaleSets(_ context.Context, entity params.ForgeEntity) ([]params.ScaleSet, error) {
+ scaleSets, err := s.listEntityScaleSets(s.conn, entity.EntityType, entity.ID)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching scale sets: %w", err)
+ }
+
+ ret := make([]params.ScaleSet, len(scaleSets))
+ for idx, set := range scaleSets {
+ ret[idx], err = s.sqlToCommonScaleSet(set)
+ if err != nil {
+ return nil, fmt.Errorf("error conbverting scale set: %w", err)
+ }
+ }
+
+ return ret, nil
+}
+
+func (s *sqlDatabase) UpdateEntityScaleSet(ctx context.Context, entity params.ForgeEntity, scaleSetID uint, param params.UpdateScaleSetParams, callback func(old, newSet params.ScaleSet) error) (updatedScaleSet params.ScaleSet, err error) {
+ defer func() {
+ if err == nil {
+ s.sendNotify(common.ScaleSetEntityType, common.UpdateOperation, updatedScaleSet)
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ scaleSet, err := s.getEntityScaleSet(tx, entity.EntityType, entity.ID, scaleSetID, "Instances")
+ if err != nil {
+ return fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ old, err := s.sqlToCommonScaleSet(scaleSet)
+ if err != nil {
+ return fmt.Errorf("error converting scale set: %w", err)
+ }
+
+ updatedScaleSet, err = s.updateScaleSet(tx, scaleSet, param)
+ if err != nil {
+ return fmt.Errorf("error updating scale set: %w", err)
+ }
+
+ if callback != nil {
+ if err := callback(old, updatedScaleSet); err != nil {
+ return fmt.Errorf("error executing update callback: %w", err)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return params.ScaleSet{}, err
+ }
+
+ updatedScaleSet, err = s.GetScaleSetByID(ctx, scaleSetID)
+ if err != nil {
+ return params.ScaleSet{}, err
+ }
+ return updatedScaleSet, nil
+}
+
+func (s *sqlDatabase) getEntityScaleSet(tx *gorm.DB, entityType params.ForgeEntityType, entityID string, scaleSetID uint, preload ...string) (ScaleSet, error) {
+ if entityID == "" {
+ return ScaleSet{}, fmt.Errorf("error missing entity id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ if scaleSetID == 0 {
+ return ScaleSet{}, fmt.Errorf("error missing scaleset id: %w", runnerErrors.ErrBadRequest)
+ }
+
+ var fieldName string
+ var entityField string
+ switch entityType {
+ case params.ForgeEntityTypeRepository:
+ fieldName = entityTypeRepoName
+ entityField = "Repository"
+ case params.ForgeEntityTypeOrganization:
+ fieldName = entityTypeOrgName
+ entityField = "Organization"
+ case params.ForgeEntityTypeEnterprise:
+ fieldName = entityTypeEnterpriseName
+ entityField = "Enterprise"
+ default:
+ return ScaleSet{}, fmt.Errorf("invalid entityType: %v", entityType)
+ }
+
+ q := tx
+ q = q.Preload(entityField)
+ if len(preload) > 0 {
+ for _, item := range preload {
+ q = q.Preload(item)
+ }
+ }
+
+ var scaleSet ScaleSet
+ condition := fmt.Sprintf("id = ? and %s = ?", fieldName)
+ err := q.Model(&ScaleSet{}).
+ Where(condition, scaleSetID, entityID).
+ First(&scaleSet).Error
+ if err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return ScaleSet{}, fmt.Errorf("error finding scale set: %w", runnerErrors.ErrNotFound)
+ }
+ return ScaleSet{}, fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ return scaleSet, nil
+}
+
+func (s *sqlDatabase) updateScaleSet(tx *gorm.DB, scaleSet ScaleSet, param params.UpdateScaleSetParams) (params.ScaleSet, error) {
+ if param.Enabled != nil && scaleSet.Enabled != *param.Enabled {
+ scaleSet.Enabled = *param.Enabled
+ }
+
+ if param.State != nil && *param.State != scaleSet.State {
+ scaleSet.State = *param.State
+ }
+
+ if param.ExtendedState != nil && *param.ExtendedState != scaleSet.ExtendedState {
+ scaleSet.ExtendedState = *param.ExtendedState
+ }
+
+ if param.ScaleSetID != 0 {
+ scaleSet.ScaleSetID = param.ScaleSetID
+ }
+
+ if param.Name != "" {
+ scaleSet.Name = param.Name
+ }
+
+ if param.GitHubRunnerGroup != nil && *param.GitHubRunnerGroup != "" {
+ scaleSet.GitHubRunnerGroup = *param.GitHubRunnerGroup
+ }
+
+ if param.Flavor != "" {
+ scaleSet.Flavor = param.Flavor
+ }
+
+ if param.Image != "" {
+ scaleSet.Image = param.Image
+ }
+
+ if param.Prefix != "" {
+ scaleSet.RunnerPrefix = param.Prefix
+ }
+
+ if param.MaxRunners != nil {
+ scaleSet.MaxRunners = *param.MaxRunners
+ }
+
+ if param.MinIdleRunners != nil {
+ scaleSet.MinIdleRunners = *param.MinIdleRunners
+ }
+
+ if param.OSArch != "" {
+ scaleSet.OSArch = param.OSArch
+ }
+
+ if param.OSType != "" {
+ scaleSet.OSType = param.OSType
+ }
+
+ if param.ExtraSpecs != nil {
+ scaleSet.ExtraSpecs = datatypes.JSON(param.ExtraSpecs)
+ }
+
+ if param.RunnerBootstrapTimeout != nil && *param.RunnerBootstrapTimeout > 0 {
+ scaleSet.RunnerBootstrapTimeout = *param.RunnerBootstrapTimeout
+ }
+
+ if param.GitHubRunnerGroup != nil {
+ scaleSet.GitHubRunnerGroup = *param.GitHubRunnerGroup
+ }
+
+ if q := tx.Save(&scaleSet); q.Error != nil {
+ return params.ScaleSet{}, fmt.Errorf("error saving database entry: %w", q.Error)
+ }
+
+ return s.sqlToCommonScaleSet(scaleSet)
+}
+
+func (s *sqlDatabase) GetScaleSetByID(_ context.Context, scaleSet uint) (params.ScaleSet, error) {
+ set, err := s.getScaleSetByID(
+ s.conn,
+ scaleSet,
+ "Instances",
+ "Enterprise",
+ "Enterprise.Endpoint",
+ "Organization",
+ "Organization.Endpoint",
+ "Repository",
+ "Repository.Endpoint",
+ )
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error fetching scale set by ID: %w", err)
+ }
+ return s.sqlToCommonScaleSet(set)
+}
+
+func (s *sqlDatabase) DeleteScaleSetByID(_ context.Context, scaleSetID uint) (err error) {
+ var scaleSet params.ScaleSet
+ defer func() {
+ if err == nil && scaleSet.ID != 0 {
+ s.sendNotify(common.ScaleSetEntityType, common.DeleteOperation, scaleSet)
+ }
+ }()
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ dbSet, err := s.getScaleSetByID(tx, scaleSetID, "Instances", "Enterprise", "Organization", "Repository")
+ if err != nil {
+ return fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ if len(dbSet.Instances) > 0 {
+ return runnerErrors.NewBadRequestError("cannot delete scaleset with runners")
+ }
+ scaleSet, err = s.sqlToCommonScaleSet(dbSet)
+ if err != nil {
+ return fmt.Errorf("error converting scale set: %w", err)
+ }
+
+ if q := tx.Unscoped().Delete(&dbSet); q.Error != nil {
+ return fmt.Errorf("error deleting scale set: %w", q.Error)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error removing scale set: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) SetScaleSetLastMessageID(_ context.Context, scaleSetID uint, lastMessageID int64) (err error) {
+ var scaleSet params.ScaleSet
+ defer func() {
+ if err == nil && scaleSet.ID != 0 {
+ s.sendNotify(common.ScaleSetEntityType, common.UpdateOperation, scaleSet)
+ }
+ }()
+ if err := s.conn.Transaction(func(tx *gorm.DB) error {
+ dbSet, err := s.getScaleSetByID(tx, scaleSetID, "Instances", "Enterprise", "Organization", "Repository")
+ if err != nil {
+ return fmt.Errorf("error fetching scale set: %w", err)
+ }
+ dbSet.LastMessageID = lastMessageID
+ if err := tx.Save(&dbSet).Error; err != nil {
+ return fmt.Errorf("error saving database entry: %w", err)
+ }
+ scaleSet, err = s.sqlToCommonScaleSet(dbSet)
+ if err != nil {
+ return fmt.Errorf("error converting scale set: %w", err)
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("error setting last message ID: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) SetScaleSetDesiredRunnerCount(_ context.Context, scaleSetID uint, desiredRunnerCount int) (err error) {
+ var scaleSet params.ScaleSet
+ defer func() {
+ if err == nil && scaleSet.ID != 0 {
+ s.sendNotify(common.ScaleSetEntityType, common.UpdateOperation, scaleSet)
+ }
+ }()
+ if err := s.conn.Transaction(func(tx *gorm.DB) error {
+ dbSet, err := s.getScaleSetByID(tx, scaleSetID, "Instances", "Enterprise", "Organization", "Repository")
+ if err != nil {
+ return fmt.Errorf("error fetching scale set: %w", err)
+ }
+ dbSet.DesiredRunnerCount = desiredRunnerCount
+ if err := tx.Save(&dbSet).Error; err != nil {
+ return fmt.Errorf("error saving database entry: %w", err)
+ }
+ scaleSet, err = s.sqlToCommonScaleSet(dbSet)
+ if err != nil {
+ return fmt.Errorf("error converting scale set: %w", err)
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("error setting desired runner count: %w", err)
+ }
+ return nil
+}
diff --git a/database/sql/scalesets_test.go b/database/sql/scalesets_test.go
new file mode 100644
index 00000000..f1f9fbba
--- /dev/null
+++ b/database/sql/scalesets_test.go
@@ -0,0 +1,368 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package sql
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+type ScaleSetsTestSuite struct {
+ suite.Suite
+ Store dbCommon.Store
+ adminCtx context.Context
+ creds params.ForgeCredentials
+
+ org params.Organization
+ repo params.Repository
+ enterprise params.Enterprise
+
+ orgEntity params.ForgeEntity
+ repoEntity params.ForgeEntity
+ enterpriseEntity params.ForgeEntity
+}
+
+func (s *ScaleSetsTestSuite) SetupTest() {
+ // create testing sqlite database
+ ctx := context.Background()
+ watcher.InitWatcher(ctx)
+
+ db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
+ }
+ s.Store = db
+
+ adminCtx := garmTesting.ImpersonateAdminContext(ctx, db, s.T())
+ s.adminCtx = adminCtx
+
+ githubEndpoint := garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.creds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), githubEndpoint)
+
+ // create an organization for testing purposes
+ s.org, err = s.Store.CreateOrganization(s.adminCtx, "test-org", s.creds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create org: %s", err))
+ }
+
+ s.repo, err = s.Store.CreateRepository(s.adminCtx, "test-org", "test-repo", s.creds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create repo: %s", err))
+ }
+
+ s.enterprise, err = s.Store.CreateEnterprise(s.adminCtx, "test-enterprise", s.creds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create enterprise: %s", err))
+ }
+
+ s.orgEntity, err = s.org.GetEntity()
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to get org entity: %s", err))
+ }
+
+ s.repoEntity, err = s.repo.GetEntity()
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to get repo entity: %s", err))
+ }
+
+ s.enterpriseEntity, err = s.enterprise.GetEntity()
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to get enterprise entity: %s", err))
+ }
+
+ s.T().Cleanup(func() {
+ err := s.Store.DeleteOrganization(s.adminCtx, s.org.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete org: %s", err))
+ }
+ err = s.Store.DeleteRepository(s.adminCtx, s.repo.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete repo: %s", err))
+ }
+ err = s.Store.DeleteEnterprise(s.adminCtx, s.enterprise.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete enterprise: %s", err))
+ }
+ })
+}
+
+func (s *ScaleSetsTestSuite) TearDownTest() {
+ watcher.CloseWatcher()
+}
+
+func (s *ScaleSetsTestSuite) callback(old, newSet params.ScaleSet) error {
+ s.Require().Equal(old.Name, "test-scaleset")
+ s.Require().Equal(newSet.Name, "test-scaleset-updated")
+ s.Require().Equal(old.OSType, commonParams.Linux)
+ s.Require().Equal(newSet.OSType, commonParams.Windows)
+ s.Require().Equal(old.OSArch, commonParams.Amd64)
+ s.Require().Equal(newSet.OSArch, commonParams.Arm64)
+ s.Require().Equal(old.ExtraSpecs, json.RawMessage(`{"test": 1}`))
+ s.Require().Equal(newSet.ExtraSpecs, json.RawMessage(`{"test": 111}`))
+ s.Require().Equal(old.MaxRunners, uint(10))
+ s.Require().Equal(newSet.MaxRunners, uint(60))
+ s.Require().Equal(old.MinIdleRunners, uint(5))
+ s.Require().Equal(newSet.MinIdleRunners, uint(50))
+ s.Require().Equal(old.Image, "test-image")
+ s.Require().Equal(newSet.Image, "new-test-image")
+ s.Require().Equal(old.Flavor, "test-flavor")
+ s.Require().Equal(newSet.Flavor, "new-test-flavor")
+ s.Require().Equal(old.GitHubRunnerGroup, "test-group")
+ s.Require().Equal(newSet.GitHubRunnerGroup, "new-test-group")
+ s.Require().Equal(old.RunnerPrefix.Prefix, "garm")
+ s.Require().Equal(newSet.RunnerPrefix.Prefix, "test-prefix2")
+ s.Require().Equal(old.Enabled, false)
+ s.Require().Equal(newSet.Enabled, true)
+ return nil
+}
+
+func (s *ScaleSetsTestSuite) TestScaleSetOperations() {
+ // create a scale set for the organization
+ createScaleSetPrams := params.CreateScaleSetParams{
+ Name: "test-scaleset",
+ ProviderName: "test-provider",
+ MaxRunners: 10,
+ MinIdleRunners: 5,
+ Image: "test-image",
+ Flavor: "test-flavor",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ ExtraSpecs: json.RawMessage(`{"test": 1}`),
+ GitHubRunnerGroup: "test-group",
+ }
+
+ var orgScaleSet params.ScaleSet
+ var repoScaleSet params.ScaleSet
+ var enterpriseScaleSet params.ScaleSet
+ var err error
+
+ s.T().Run("create org scaleset", func(_ *testing.T) {
+ orgScaleSet, err = s.Store.CreateEntityScaleSet(s.adminCtx, s.orgEntity, createScaleSetPrams)
+ s.Require().NoError(err)
+ s.Require().NotNil(orgScaleSet)
+ s.Require().Equal(orgScaleSet.Name, createScaleSetPrams.Name)
+ s.T().Cleanup(func() {
+ err := s.Store.DeleteScaleSetByID(s.adminCtx, orgScaleSet.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete scaleset: %s", err))
+ }
+ })
+ })
+
+ s.T().Run("create repo scaleset", func(_ *testing.T) {
+ repoScaleSet, err = s.Store.CreateEntityScaleSet(s.adminCtx, s.repoEntity, createScaleSetPrams)
+ s.Require().NoError(err)
+ s.Require().NotNil(repoScaleSet)
+ s.Require().Equal(repoScaleSet.Name, createScaleSetPrams.Name)
+ s.T().Cleanup(func() {
+ err := s.Store.DeleteScaleSetByID(s.adminCtx, repoScaleSet.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete scaleset: %s", err))
+ }
+ })
+ })
+
+ s.T().Run("create enterprise scaleset", func(_ *testing.T) {
+ enterpriseScaleSet, err = s.Store.CreateEntityScaleSet(s.adminCtx, s.enterpriseEntity, createScaleSetPrams)
+ s.Require().NoError(err)
+ s.Require().NotNil(enterpriseScaleSet)
+ s.Require().Equal(enterpriseScaleSet.Name, createScaleSetPrams.Name)
+
+ s.T().Cleanup(func() {
+ err := s.Store.DeleteScaleSetByID(s.adminCtx, enterpriseScaleSet.ID)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete scaleset: %s", err))
+ }
+ })
+ })
+
+ s.T().Run("create list all scalesets", func(_ *testing.T) {
+ allScaleSets, err := s.Store.ListAllScaleSets(s.adminCtx)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(allScaleSets)
+ s.Require().Len(allScaleSets, 3)
+ })
+
+ s.T().Run("list repo scalesets", func(_ *testing.T) {
+ repoScaleSets, err := s.Store.ListEntityScaleSets(s.adminCtx, s.repoEntity)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repoScaleSets)
+ s.Require().Len(repoScaleSets, 1)
+ })
+
+ s.T().Run("list org scalesets", func(_ *testing.T) {
+ orgScaleSets, err := s.Store.ListEntityScaleSets(s.adminCtx, s.orgEntity)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(orgScaleSets)
+ s.Require().Len(orgScaleSets, 1)
+ })
+
+ s.T().Run("list enterprise scalesets", func(_ *testing.T) {
+ enterpriseScaleSets, err := s.Store.ListEntityScaleSets(s.adminCtx, s.enterpriseEntity)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(enterpriseScaleSets)
+ s.Require().Len(enterpriseScaleSets, 1)
+ })
+
+ s.T().Run("get repo scaleset by ID", func(_ *testing.T) {
+ repoScaleSetByID, err := s.Store.GetScaleSetByID(s.adminCtx, repoScaleSet.ID)
+ s.Require().NoError(err)
+ s.Require().NotNil(repoScaleSetByID)
+ s.Require().Equal(repoScaleSetByID.ID, repoScaleSet.ID)
+ s.Require().Equal(repoScaleSetByID.Name, repoScaleSet.Name)
+ })
+
+ s.T().Run("get org scaleset by ID", func(_ *testing.T) {
+ orgScaleSetByID, err := s.Store.GetScaleSetByID(s.adminCtx, orgScaleSet.ID)
+ s.Require().NoError(err)
+ s.Require().NotNil(orgScaleSetByID)
+ s.Require().Equal(orgScaleSetByID.ID, orgScaleSet.ID)
+ s.Require().Equal(orgScaleSetByID.Name, orgScaleSet.Name)
+ })
+
+ s.T().Run("get enterprise scaleset by ID", func(_ *testing.T) {
+ enterpriseScaleSetByID, err := s.Store.GetScaleSetByID(s.adminCtx, enterpriseScaleSet.ID)
+ s.Require().NoError(err)
+ s.Require().NotNil(enterpriseScaleSetByID)
+ s.Require().Equal(enterpriseScaleSetByID.ID, enterpriseScaleSet.ID)
+ s.Require().Equal(enterpriseScaleSetByID.Name, enterpriseScaleSet.Name)
+ })
+
+ s.T().Run("get scaleset by ID not found", func(_ *testing.T) {
+ _, err = s.Store.GetScaleSetByID(s.adminCtx, 999)
+ s.Require().Error(err)
+ s.Require().Contains(err.Error(), "not found")
+ })
+
+ s.T().Run("Set scale set last message ID and desired count", func(_ *testing.T) {
+ err = s.Store.SetScaleSetLastMessageID(s.adminCtx, orgScaleSet.ID, 20)
+ s.Require().NoError(err)
+ err = s.Store.SetScaleSetDesiredRunnerCount(s.adminCtx, orgScaleSet.ID, 5)
+ s.Require().NoError(err)
+ orgScaleSetByID, err := s.Store.GetScaleSetByID(s.adminCtx, orgScaleSet.ID)
+ s.Require().NoError(err)
+ s.Require().NotNil(orgScaleSetByID)
+ s.Require().Equal(orgScaleSetByID.LastMessageID, int64(20))
+ s.Require().Equal(orgScaleSetByID.DesiredRunnerCount, 5)
+ })
+
+ updateParams := params.UpdateScaleSetParams{
+ Name: "test-scaleset-updated",
+ RunnerPrefix: params.RunnerPrefix{
+ Prefix: "test-prefix2",
+ },
+ OSType: commonParams.Windows,
+ OSArch: commonParams.Arm64,
+ ExtraSpecs: json.RawMessage(`{"test": 111}`),
+ Enabled: garmTesting.Ptr(true),
+ MaxRunners: garmTesting.Ptr(uint(60)),
+ MinIdleRunners: garmTesting.Ptr(uint(50)),
+ Image: "new-test-image",
+ Flavor: "new-test-flavor",
+ GitHubRunnerGroup: garmTesting.Ptr("new-test-group"),
+ }
+
+ s.T().Run("update repo scaleset", func(_ *testing.T) {
+ newRepoScaleSet, err := s.Store.UpdateEntityScaleSet(s.adminCtx, s.repoEntity, repoScaleSet.ID, updateParams, s.callback)
+ s.Require().NoError(err)
+ s.Require().NotNil(newRepoScaleSet)
+ s.Require().NoError(s.callback(repoScaleSet, newRepoScaleSet))
+ })
+
+ s.T().Run("update org scaleset", func(_ *testing.T) {
+ newOrgScaleSet, err := s.Store.UpdateEntityScaleSet(s.adminCtx, s.orgEntity, orgScaleSet.ID, updateParams, s.callback)
+ s.Require().NoError(err)
+ s.Require().NotNil(newOrgScaleSet)
+ s.Require().NoError(s.callback(orgScaleSet, newOrgScaleSet))
+ })
+
+ s.T().Run("update enterprise scaleset", func(_ *testing.T) {
+ newEnterpriseScaleSet, err := s.Store.UpdateEntityScaleSet(s.adminCtx, s.enterpriseEntity, enterpriseScaleSet.ID, updateParams, s.callback)
+ s.Require().NoError(err)
+ s.Require().NotNil(newEnterpriseScaleSet)
+ s.Require().NoError(s.callback(enterpriseScaleSet, newEnterpriseScaleSet))
+ })
+
+ s.T().Run("update scaleset not found", func(_ *testing.T) {
+ _, err = s.Store.UpdateEntityScaleSet(s.adminCtx, s.enterpriseEntity, 99999, updateParams, s.callback)
+ s.Require().Error(err)
+ s.Require().Contains(err.Error(), "not found")
+ })
+
+ s.T().Run("update scaleset with invalid entity", func(_ *testing.T) {
+ _, err = s.Store.UpdateEntityScaleSet(s.adminCtx, params.ForgeEntity{}, enterpriseScaleSet.ID, params.UpdateScaleSetParams{}, nil)
+ s.Require().Error(err)
+ s.Require().Contains(err.Error(), "missing entity id")
+ })
+
+ s.T().Run("Create repo scale set instance", func(_ *testing.T) {
+ param := params.CreateInstanceParams{
+ Name: "test-instance",
+ Status: commonParams.InstancePendingCreate,
+ RunnerStatus: params.RunnerPending,
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ CallbackURL: "http://localhost:8080/callback",
+ MetadataURL: "http://localhost:8080/metadata",
+ GitHubRunnerGroup: "test-group",
+ JitConfiguration: map[string]string{
+ "test": "test",
+ },
+ AgentID: 5,
+ }
+
+ instance, err := s.Store.CreateScaleSetInstance(s.adminCtx, repoScaleSet.ID, param)
+ s.Require().NoError(err)
+ s.Require().NotNil(instance)
+ s.Require().Equal(instance.Name, param.Name)
+ s.Require().Equal(instance.Status, param.Status)
+ s.Require().Equal(instance.RunnerStatus, param.RunnerStatus)
+ s.Require().Equal(instance.OSType, param.OSType)
+ s.Require().Equal(instance.OSArch, param.OSArch)
+ s.Require().Equal(instance.CallbackURL, param.CallbackURL)
+ s.Require().Equal(instance.MetadataURL, param.MetadataURL)
+ s.Require().Equal(instance.GitHubRunnerGroup, param.GitHubRunnerGroup)
+ s.Require().Equal(instance.JitConfiguration, param.JitConfiguration)
+ s.Require().Equal(instance.AgentID, param.AgentID)
+
+ s.T().Cleanup(func() {
+ err := s.Store.DeleteInstanceByName(s.adminCtx, instance.Name)
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to delete scaleset instance: %s", err))
+ }
+ })
+ })
+
+ s.T().Run("List repo scale set instances", func(_ *testing.T) {
+ instances, err := s.Store.ListScaleSetInstances(s.adminCtx, repoScaleSet.ID)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(instances)
+ s.Require().Len(instances, 1)
+ })
+}
+
+func TestScaleSetsTestSuite(t *testing.T) {
+ suite.Run(t, new(ScaleSetsTestSuite))
+}
diff --git a/database/sql/sql.go b/database/sql/sql.go
index 6a003be1..7d1fc96c 100644
--- a/database/sql/sql.go
+++ b/database/sql/sql.go
@@ -16,23 +16,37 @@ package sql
import (
"context"
- "log"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/url"
+ "strings"
- "github.com/pkg/errors"
"gorm.io/driver/mysql"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
"github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/util/appdefaults"
+)
+
+const (
+ repositoryFieldName string = "Repository"
+ organizationFieldName string = "Organization"
+ enterpriseFieldName string = "Enterprise"
)
// newDBConn returns a new gorm db connection, given the config
func newDBConn(dbCfg config.Database) (conn *gorm.DB, err error) {
dbType, connURI, err := dbCfg.GormParams()
if err != nil {
- return nil, errors.Wrap(err, "getting DB URI string")
+ return nil, fmt.Errorf("error getting DB URI string: %w", err)
}
gormConfig := &gorm.Config{}
@@ -47,7 +61,7 @@ func newDBConn(dbCfg config.Database) (conn *gorm.DB, err error) {
conn, err = gorm.Open(sqlite.Open(connURI), gormConfig)
}
if err != nil {
- return nil, errors.Wrap(err, "connecting to database")
+ return nil, fmt.Errorf("error connecting to database: %w", err)
}
if dbCfg.Debug {
@@ -59,52 +73,426 @@ func newDBConn(dbCfg config.Database) (conn *gorm.DB, err error) {
func NewSQLDatabase(ctx context.Context, cfg config.Database) (common.Store, error) {
conn, err := newDBConn(cfg)
if err != nil {
- return nil, errors.Wrap(err, "creating DB connection")
+ return nil, fmt.Errorf("error creating DB connection: %w", err)
+ }
+ producer, err := watcher.RegisterProducer(ctx, "sql")
+ if err != nil {
+ return nil, fmt.Errorf("error registering producer: %w", err)
}
db := &sqlDatabase{
- conn: conn,
- ctx: ctx,
- cfg: cfg,
+ conn: conn,
+ ctx: ctx,
+ cfg: cfg,
+ producer: producer,
}
if err := db.migrateDB(); err != nil {
- return nil, errors.Wrap(err, "migrating database")
+ return nil, fmt.Errorf("error migrating database: %w", err)
}
return db, nil
}
type sqlDatabase struct {
- conn *gorm.DB
- ctx context.Context
- cfg config.Database
+ conn *gorm.DB
+ ctx context.Context
+ cfg config.Database
+ producer common.Producer
+}
+
+var renameTemplate = `
+PRAGMA foreign_keys = OFF;
+BEGIN TRANSACTION;
+
+ALTER TABLE %s RENAME TO %s_old;
+COMMIT;
+`
+
+var restoreNameTemplate = `
+PRAGMA foreign_keys = OFF;
+BEGIN TRANSACTION;
+DROP TABLE IF EXISTS %s;
+ALTER TABLE %s_old RENAME TO %s;
+COMMIT;
+`
+
+var copyContentsTemplate = `
+PRAGMA foreign_keys = OFF;
+BEGIN TRANSACTION;
+INSERT INTO %s SELECT * FROM %s_old;
+DROP TABLE %s_old;
+
+COMMIT;
+`
+
+func (s *sqlDatabase) cascadeMigrationSQLite(model interface{}, name string, justDrop bool) error {
+ if !s.conn.Migrator().HasTable(name) {
+ return nil
+ }
+ defer s.conn.Exec("PRAGMA foreign_keys = ON;")
+
+ var data string
+ var indexes []string
+ if err := s.conn.Raw(fmt.Sprintf("select sql from sqlite_master where type='table' and tbl_name='%s'", name)).Scan(&data).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("failed to get table %s: %w", name, err)
+ }
+ }
+
+ if err := s.conn.Raw(fmt.Sprintf("SELECT name FROM sqlite_master WHERE type == 'index' AND tbl_name == '%s' and name not like 'sqlite_%%'", name)).Scan(&indexes).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("failed to get table indexes %s: %w", name, err)
+ }
+ }
+
+ if strings.Contains(data, "ON DELETE") {
+ return nil
+ }
+
+ if justDrop {
+ if err := s.conn.Migrator().DropTable(model); err != nil {
+ return fmt.Errorf("failed to drop table %s: %w", name, err)
+ }
+ return nil
+ }
+
+ for _, index := range indexes {
+ if err := s.conn.Migrator().DropIndex(model, index); err != nil {
+ return fmt.Errorf("failed to drop index %s: %w", index, err)
+ }
+ }
+
+ err := s.conn.Exec(fmt.Sprintf(renameTemplate, name, name)).Error
+ if err != nil {
+ return fmt.Errorf("failed to rename table %s: %w", name, err)
+ }
+
+ if model != nil {
+ if err := s.conn.Migrator().AutoMigrate(model); err != nil {
+ if err := s.conn.Exec(fmt.Sprintf(restoreNameTemplate, name, name, name)).Error; err != nil {
+ slog.With(slog.Any("error", err)).Error("failed to restore table", "table", name)
+ }
+ return fmt.Errorf("failed to create table %s: %w", name, err)
+ }
+ }
+ err = s.conn.Exec(fmt.Sprintf(copyContentsTemplate, name, name, name)).Error
+ if err != nil {
+ return fmt.Errorf("failed to copy contents to table %s: %w", name, err)
+ }
+
+ return nil
+}
+
+func (s *sqlDatabase) cascadeMigration() error {
+ switch s.cfg.DbBackend {
+ case config.SQLiteBackend:
+ if err := s.cascadeMigrationSQLite(&Address{}, "addresses", true); err != nil {
+ return fmt.Errorf("failed to drop table addresses: %w", err)
+ }
+
+ if err := s.cascadeMigrationSQLite(&InstanceStatusUpdate{}, "instance_status_updates", true); err != nil {
+ return fmt.Errorf("failed to drop table instance_status_updates: %w", err)
+ }
+
+ if err := s.cascadeMigrationSQLite(&Tag{}, "pool_tags", false); err != nil {
+ return fmt.Errorf("failed to migrate addresses: %w", err)
+ }
+
+ if err := s.cascadeMigrationSQLite(&WorkflowJob{}, "workflow_jobs", false); err != nil {
+ return fmt.Errorf("failed to migrate addresses: %w", err)
+ }
+ case config.MySQLBackend:
+ return nil
+ default:
+ return fmt.Errorf("invalid db backend: %s", s.cfg.DbBackend)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) ensureGithubEndpoint() error {
+ // Create the default Github endpoint.
+ createEndpointParams := params.CreateGithubEndpointParams{
+ Name: "github.com",
+ Description: "The github.com endpoint",
+ APIBaseURL: appdefaults.GithubDefaultBaseURL,
+ BaseURL: appdefaults.DefaultGithubURL,
+ UploadBaseURL: appdefaults.GithubDefaultUploadBaseURL,
+ }
+
+ var epCount int64
+ if err := s.conn.Model(&GithubEndpoint{}).Count(&epCount).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error counting github endpoints: %w", err)
+ }
+ }
+
+ if epCount == 0 {
+ if _, err := s.CreateGithubEndpoint(context.Background(), createEndpointParams); err != nil {
+ if !errors.Is(err, runnerErrors.ErrDuplicateEntity) {
+ return fmt.Errorf("error creating default github endpoint: %w", err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *sqlDatabase) migrateCredentialsToDB() (err error) {
+ s.conn.Exec("PRAGMA foreign_keys = OFF")
+ defer s.conn.Exec("PRAGMA foreign_keys = ON")
+
+ adminUser, err := s.GetAdminUser(s.ctx)
+ if err != nil {
+ if errors.Is(err, runnerErrors.ErrNotFound) {
+ // Admin user doesn't exist. This is a new deploy. Nothing to migrate.
+ return nil
+ }
+ return fmt.Errorf("error getting admin user: %w", err)
+ }
+
+ // Impersonate the admin user. We're migrating from config credentials to
+ // database credentials. At this point, there is no other user than the admin
+ // user. GARM is not yet multi-user, so it's safe to assume we only have this
+ // one user.
+ adminCtx := context.Background()
+ adminCtx = auth.PopulateContext(adminCtx, adminUser, nil)
+
+ slog.Info("migrating credentials to DB")
+ slog.Info("creating github endpoints table")
+ if err := s.conn.AutoMigrate(&GithubEndpoint{}); err != nil {
+ return fmt.Errorf("error migrating github endpoints: %w", err)
+ }
+
+ defer func() {
+ if err != nil {
+ slog.With(slog.Any("error", err)).Error("rolling back github github endpoints table")
+ s.conn.Migrator().DropTable(&GithubEndpoint{})
+ }
+ }()
+
+ slog.Info("creating github credentials table")
+ if err := s.conn.AutoMigrate(&GithubCredentials{}); err != nil {
+ return fmt.Errorf("error migrating github credentials: %w", err)
+ }
+
+ defer func() {
+ if err != nil {
+ slog.With(slog.Any("error", err)).Error("rolling back github github credentials table")
+ s.conn.Migrator().DropTable(&GithubCredentials{})
+ }
+ }()
+
+ // Nothing to migrate.
+ if len(s.cfg.MigrateCredentials) == 0 {
+ return nil
+ }
+
+ slog.Info("importing credentials from config")
+ for _, cred := range s.cfg.MigrateCredentials {
+ slog.Info("importing credential", "name", cred.Name)
+ parsed, err := url.Parse(cred.BaseEndpoint())
+ if err != nil {
+ return fmt.Errorf("error parsing base URL: %w", err)
+ }
+
+ certBundle, err := cred.CACertBundle()
+ if err != nil {
+ return fmt.Errorf("error getting CA cert bundle: %w", err)
+ }
+ hostname := parsed.Hostname()
+ createParams := params.CreateGithubEndpointParams{
+ Name: hostname,
+ Description: fmt.Sprintf("Endpoint for %s", hostname),
+ APIBaseURL: cred.APIEndpoint(),
+ BaseURL: cred.BaseEndpoint(),
+ UploadBaseURL: cred.UploadEndpoint(),
+ CACertBundle: certBundle,
+ }
+
+ var endpoint params.ForgeEndpoint
+ endpoint, err = s.GetGithubEndpoint(adminCtx, hostname)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ return fmt.Errorf("error getting github endpoint: %w", err)
+ }
+ endpoint, err = s.CreateGithubEndpoint(adminCtx, createParams)
+ if err != nil {
+ return fmt.Errorf("error creating default github endpoint: %w", err)
+ }
+ }
+
+ credParams := params.CreateGithubCredentialsParams{
+ Name: cred.Name,
+ Description: cred.Description,
+ Endpoint: endpoint.Name,
+ AuthType: params.ForgeAuthType(cred.GetAuthType()),
+ }
+ switch credParams.AuthType {
+ case params.ForgeAuthTypeApp:
+ keyBytes, err := cred.App.PrivateKeyBytes()
+ if err != nil {
+ return fmt.Errorf("error getting private key bytes: %w", err)
+ }
+ credParams.App = params.GithubApp{
+ AppID: cred.App.AppID,
+ InstallationID: cred.App.InstallationID,
+ PrivateKeyBytes: keyBytes,
+ }
+
+ if err := credParams.App.Validate(); err != nil {
+ return fmt.Errorf("error validating app credentials: %w", err)
+ }
+ case params.ForgeAuthTypePAT:
+ token := cred.PAT.OAuth2Token
+ if token == "" {
+ token = cred.OAuth2Token
+ }
+ if token == "" {
+ return errors.New("missing OAuth2 token")
+ }
+ credParams.PAT = params.GithubPAT{
+ OAuth2Token: token,
+ }
+ }
+
+ creds, err := s.CreateGithubCredentials(adminCtx, credParams)
+ if err != nil {
+ return fmt.Errorf("error creating github credentials: %w", err)
+ }
+
+ if err := s.conn.Exec("update repositories set credentials_id = ?,endpoint_name = ? where credentials_name = ?", creds.ID, creds.Endpoint.Name, creds.Name).Error; err != nil {
+ return fmt.Errorf("error updating repositories: %w", err)
+ }
+
+ if err := s.conn.Exec("update organizations set credentials_id = ?,endpoint_name = ? where credentials_name = ?", creds.ID, creds.Endpoint.Name, creds.Name).Error; err != nil {
+ return fmt.Errorf("error updating organizations: %w", err)
+ }
+
+ if err := s.conn.Exec("update enterprises set credentials_id = ?,endpoint_name = ? where credentials_name = ?", creds.ID, creds.Endpoint.Name, creds.Name).Error; err != nil {
+ return fmt.Errorf("error updating enterprises: %w", err)
+ }
+ }
+ return nil
+}
+
+func (s *sqlDatabase) migrateWorkflow() error {
+ if s.conn.Migrator().HasTable(&WorkflowJob{}) {
+ if s.conn.Migrator().HasColumn(&WorkflowJob{}, "runner_name") {
+ // Remove jobs that are not in "queued" status. We really only care about queued jobs. Once they transition
+ // to something else, we don't really consume them anyway.
+ if err := s.conn.Exec("delete from workflow_jobs where status is not 'queued'").Error; err != nil {
+ return fmt.Errorf("error updating workflow_jobs: %w", err)
+ }
+ if err := s.conn.Migrator().DropColumn(&WorkflowJob{}, "runner_name"); err != nil {
+ return fmt.Errorf("error updating workflow_jobs: %w", err)
+ }
+ }
+ }
+ return nil
}
func (s *sqlDatabase) migrateDB() error {
if s.conn.Migrator().HasIndex(&Organization{}, "idx_organizations_name") {
if err := s.conn.Migrator().DropIndex(&Organization{}, "idx_organizations_name"); err != nil {
- log.Printf("failed to drop index idx_organizations_name: %s", err)
+ slog.With(slog.Any("error", err)).Error("failed to drop index idx_organizations_name")
}
}
if s.conn.Migrator().HasIndex(&Repository{}, "idx_owner") {
if err := s.conn.Migrator().DropIndex(&Repository{}, "idx_owner"); err != nil {
- log.Printf("failed to drop index idx_owner: %s", err)
+ slog.With(slog.Any("error", err)).Error("failed to drop index idx_owner")
}
}
+
+ if err := s.cascadeMigration(); err != nil {
+ return fmt.Errorf("error running cascade migration: %w", err)
+ }
+
+ if s.conn.Migrator().HasTable(&Pool{}) {
+ if err := s.conn.Exec("update pools set repo_id=NULL where repo_id='00000000-0000-0000-0000-000000000000'").Error; err != nil {
+ return fmt.Errorf("error updating pools %w", err)
+ }
+
+ if err := s.conn.Exec("update pools set org_id=NULL where org_id='00000000-0000-0000-0000-000000000000'").Error; err != nil {
+ return fmt.Errorf("error updating pools: %w", err)
+ }
+
+ if err := s.conn.Exec("update pools set enterprise_id=NULL where enterprise_id='00000000-0000-0000-0000-000000000000'").Error; err != nil {
+ return fmt.Errorf("error updating pools: %w", err)
+ }
+ }
+
+ if err := s.migrateWorkflow(); err != nil {
+ return fmt.Errorf("error migrating workflows: %w", err)
+ }
+
+ if s.conn.Migrator().HasTable(&GithubEndpoint{}) {
+ if !s.conn.Migrator().HasColumn(&GithubEndpoint{}, "endpoint_type") {
+ if err := s.conn.Migrator().AutoMigrate(&GithubEndpoint{}); err != nil {
+ return fmt.Errorf("error migrating github endpoints: %w", err)
+ }
+ if err := s.conn.Exec("update github_endpoints set endpoint_type = 'github' where endpoint_type is null").Error; err != nil {
+ return fmt.Errorf("error updating github endpoints: %w", err)
+ }
+ }
+ }
+
+ var needsCredentialMigration bool
+ if !s.conn.Migrator().HasTable(&GithubCredentials{}) || !s.conn.Migrator().HasTable(&GithubEndpoint{}) {
+ needsCredentialMigration = true
+ }
+
+ var hasMinAgeField bool
+ if s.conn.Migrator().HasTable(&ControllerInfo{}) && s.conn.Migrator().HasColumn(&ControllerInfo{}, "minimum_job_age_backoff") {
+ hasMinAgeField = true
+ }
+
+ s.conn.Exec("PRAGMA foreign_keys = OFF")
if err := s.conn.AutoMigrate(
+ &User{},
+ &GithubEndpoint{},
+ &GithubCredentials{},
+ &GiteaCredentials{},
&Tag{},
&Pool{},
&Repository{},
&Organization{},
&Enterprise{},
+ &EnterpriseEvent{},
+ &OrganizationEvent{},
+ &RepositoryEvent{},
&Address{},
&InstanceStatusUpdate{},
&Instance{},
&ControllerInfo{},
- &User{},
+ &WorkflowJob{},
+ &ScaleSet{},
); err != nil {
- return errors.Wrap(err, "running auto migrate")
+ return fmt.Errorf("error running auto migrate: %w", err)
+ }
+ s.conn.Exec("PRAGMA foreign_keys = ON")
+
+ if !hasMinAgeField {
+ var controller ControllerInfo
+ if err := s.conn.First(&controller).Error; err != nil {
+ if !errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error updating controller info: %w", err)
+ }
+ } else {
+ controller.MinimumJobAgeBackoff = 30
+ if err := s.conn.Save(&controller).Error; err != nil {
+ return fmt.Errorf("error updating controller info: %w", err)
+ }
+ }
}
+ if err := s.ensureGithubEndpoint(); err != nil {
+ return fmt.Errorf("error ensuring github endpoint: %w", err)
+ }
+
+ if needsCredentialMigration {
+ if err := s.migrateCredentialsToDB(); err != nil {
+ return fmt.Errorf("error migrating credentials: %w", err)
+ }
+ }
return nil
}
diff --git a/database/sql/users.go b/database/sql/users.go
index 5e40c5cb..ca78c5e8 100644
--- a/database/sql/users.go
+++ b/database/sql/users.go
@@ -16,17 +16,17 @@ package sql
import (
"context"
+ "errors"
"fmt"
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/util"
-
- "github.com/pkg/errors"
"gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm-provider-common/util"
+ "github.com/cloudbase/garm/params"
)
-func (s *sqlDatabase) getUserByUsernameOrEmail(user string) (User, error) {
+func (s *sqlDatabase) getUserByUsernameOrEmail(tx *gorm.DB, user string) (User, error) {
field := "username"
if util.IsValidEmail(user) {
field = "email"
@@ -34,39 +34,32 @@ func (s *sqlDatabase) getUserByUsernameOrEmail(user string) (User, error) {
query := fmt.Sprintf("%s = ?", field)
var dbUser User
- q := s.conn.Model(&User{}).Where(query, user).First(&dbUser)
+ q := tx.Model(&User{}).Where(query, user).First(&dbUser)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return User{}, runnerErrors.ErrNotFound
}
- return User{}, errors.Wrap(q.Error, "fetching user")
+ return User{}, fmt.Errorf("error fetching user: %w", q.Error)
}
return dbUser, nil
}
-func (s *sqlDatabase) getUserByID(userID string) (User, error) {
+func (s *sqlDatabase) getUserByID(tx *gorm.DB, userID string) (User, error) {
var dbUser User
- q := s.conn.Model(&User{}).Where("id = ?", userID).First(&dbUser)
+ q := tx.Model(&User{}).Where("id = ?", userID).First(&dbUser)
if q.Error != nil {
if errors.Is(q.Error, gorm.ErrRecordNotFound) {
return User{}, runnerErrors.ErrNotFound
}
- return User{}, errors.Wrap(q.Error, "fetching user")
+ return User{}, fmt.Errorf("error fetching user: %w", q.Error)
}
return dbUser, nil
}
-func (s *sqlDatabase) CreateUser(ctx context.Context, user params.NewUserParams) (params.User, error) {
- if user.Username == "" || user.Email == "" {
- return params.User{}, runnerErrors.NewBadRequestError("missing username or email")
+func (s *sqlDatabase) CreateUser(_ context.Context, user params.NewUserParams) (params.User, error) {
+ if user.Username == "" || user.Email == "" || user.Password == "" {
+ return params.User{}, runnerErrors.NewBadRequestError("missing username, password or email")
}
- if _, err := s.getUserByUsernameOrEmail(user.Username); err == nil || !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.User{}, runnerErrors.NewConflictError("username already exists")
- }
- if _, err := s.getUserByUsernameOrEmail(user.Email); err == nil || !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.User{}, runnerErrors.NewConflictError("email already exists")
- }
-
newUser := User{
Username: user.Username,
Password: user.Password,
@@ -75,57 +68,98 @@ func (s *sqlDatabase) CreateUser(ctx context.Context, user params.NewUserParams)
Email: user.Email,
IsAdmin: user.IsAdmin,
}
+ err := s.conn.Transaction(func(tx *gorm.DB) error {
+ if _, err := s.getUserByUsernameOrEmail(tx, user.Username); err == nil || !errors.Is(err, runnerErrors.ErrNotFound) {
+ return runnerErrors.NewConflictError("username already exists")
+ }
+ if _, err := s.getUserByUsernameOrEmail(tx, user.Email); err == nil || !errors.Is(err, runnerErrors.ErrNotFound) {
+ return runnerErrors.NewConflictError("email already exists")
+ }
- q := s.conn.Save(&newUser)
- if q.Error != nil {
- return params.User{}, errors.Wrap(q.Error, "creating user")
+ if s.hasAdmin(tx) && user.IsAdmin {
+ return runnerErrors.NewBadRequestError("admin user already exists")
+ }
+
+ q := tx.Save(&newUser)
+ if q.Error != nil {
+ return fmt.Errorf("error creating user: %w", q.Error)
+ }
+ return nil
+ })
+ if err != nil {
+ return params.User{}, fmt.Errorf("error creating user: %w", err)
}
return s.sqlToParamsUser(newUser), nil
}
-func (s *sqlDatabase) HasAdminUser(ctx context.Context) bool {
+func (s *sqlDatabase) hasAdmin(tx *gorm.DB) bool {
var user User
- q := s.conn.Model(&User{}).Where("is_admin = ?", true).First(&user)
+ q := tx.Model(&User{}).Where("is_admin = ?", true).First(&user)
return q.Error == nil
}
-func (s *sqlDatabase) GetUser(ctx context.Context, user string) (params.User, error) {
- dbUser, err := s.getUserByUsernameOrEmail(user)
+func (s *sqlDatabase) HasAdminUser(_ context.Context) bool {
+ return s.hasAdmin(s.conn)
+}
+
+func (s *sqlDatabase) GetUser(_ context.Context, user string) (params.User, error) {
+ dbUser, err := s.getUserByUsernameOrEmail(s.conn, user)
if err != nil {
- return params.User{}, errors.Wrap(err, "fetching user")
+ return params.User{}, fmt.Errorf("error fetching user: %w", err)
}
return s.sqlToParamsUser(dbUser), nil
}
-func (s *sqlDatabase) GetUserByID(ctx context.Context, userID string) (params.User, error) {
- dbUser, err := s.getUserByID(userID)
+func (s *sqlDatabase) GetUserByID(_ context.Context, userID string) (params.User, error) {
+ dbUser, err := s.getUserByID(s.conn, userID)
if err != nil {
- return params.User{}, errors.Wrap(err, "fetching user")
+ return params.User{}, fmt.Errorf("error fetching user: %w", err)
}
return s.sqlToParamsUser(dbUser), nil
}
-func (s *sqlDatabase) UpdateUser(ctx context.Context, user string, param params.UpdateUserParams) (params.User, error) {
- dbUser, err := s.getUserByUsernameOrEmail(user)
+func (s *sqlDatabase) UpdateUser(_ context.Context, user string, param params.UpdateUserParams) (params.User, error) {
+ var err error
+ var dbUser User
+ err = s.conn.Transaction(func(tx *gorm.DB) error {
+ dbUser, err = s.getUserByUsernameOrEmail(tx, user)
+ if err != nil {
+ return fmt.Errorf("error fetching user: %w", err)
+ }
+
+ if param.FullName != "" {
+ dbUser.FullName = param.FullName
+ }
+
+ if param.Enabled != nil {
+ dbUser.Enabled = *param.Enabled
+ }
+
+ if param.Password != "" {
+ dbUser.Password = param.Password
+ dbUser.Generation++
+ }
+
+ if q := tx.Save(&dbUser); q.Error != nil {
+ return fmt.Errorf("error saving user: %w", q.Error)
+ }
+ return nil
+ })
if err != nil {
- return params.User{}, errors.Wrap(err, "fetching user")
+ return params.User{}, fmt.Errorf("error updating user: %w", err)
}
-
- if param.FullName != "" {
- dbUser.FullName = param.FullName
- }
-
- if param.Enabled != nil {
- dbUser.Enabled = *param.Enabled
- }
-
- if param.Password != "" {
- dbUser.Password = param.Password
- }
-
- if q := s.conn.Save(&dbUser); q.Error != nil {
- return params.User{}, errors.Wrap(q.Error, "saving user")
- }
-
return s.sqlToParamsUser(dbUser), nil
}
+
+// GetAdminUser returns the system admin user. This is only for internal use.
+func (s *sqlDatabase) GetAdminUser(_ context.Context) (params.User, error) {
+ var user User
+ q := s.conn.Model(&User{}).Where("is_admin = ?", true).First(&user)
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return params.User{}, runnerErrors.ErrNotFound
+ }
+ return params.User{}, fmt.Errorf("error fetching admin user: %w", q.Error)
+ }
+ return s.sqlToParamsUser(user), nil
+}
diff --git a/database/sql/users_test.go b/database/sql/users_test.go
index 37105cb6..369abff3 100644
--- a/database/sql/users_test.go
+++ b/database/sql/users_test.go
@@ -21,14 +21,16 @@ import (
"regexp"
"testing"
- dbCommon "github.com/cloudbase/garm/database/common"
- garmTesting "github.com/cloudbase/garm/internal/testing"
- "github.com/cloudbase/garm/params"
"github.com/stretchr/testify/suite"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/logger"
+
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
)
type UserTestFixtures struct {
@@ -52,7 +54,13 @@ func (s *UserTestSuite) assertSQLMockExpectations() {
}
}
+func (s *UserTestSuite) TearDownTest() {
+ watcher.CloseWatcher()
+}
+
func (s *UserTestSuite) SetupTest() {
+ ctx := context.Background()
+ watcher.InitWatcher(ctx)
// create testing sqlite database
db, err := NewSQLDatabase(context.Background(), garmTesting.GetTestSqliteDBConfig(s.T()))
if err != nil {
@@ -90,7 +98,7 @@ func (s *UserTestSuite) SetupTest() {
SkipInitializeWithVersion: true,
}
gormConfig := &gorm.Config{}
- if flag.Lookup("test.v").Value.String() == "false" {
+ if flag.Lookup("test.v").Value.String() == falseString {
gormConfig.Logger = logger.Default.LogMode(logger.Silent)
}
gormConn, err := gorm.Open(mysql.New(mysqlConfig), gormConfig)
@@ -144,7 +152,7 @@ func (s *UserTestSuite) TestCreateUserMissingUsernameEmail() {
_, err := s.Store.CreateUser(context.Background(), s.Fixtures.NewUserParams)
s.Require().NotNil(err)
- s.Require().Equal(("missing username or email"), err.Error())
+ s.Require().Equal(("missing username, password or email"), err.Error())
}
func (s *UserTestSuite) TestCreateUserUsernameAlreadyExist() {
@@ -153,7 +161,7 @@ func (s *UserTestSuite) TestCreateUserUsernameAlreadyExist() {
_, err := s.Store.CreateUser(context.Background(), s.Fixtures.NewUserParams)
s.Require().NotNil(err)
- s.Require().Equal(("username already exists"), err.Error())
+ s.Require().Equal(("error creating user: username already exists"), err.Error())
}
func (s *UserTestSuite) TestCreateUserEmailAlreadyExist() {
@@ -162,19 +170,19 @@ func (s *UserTestSuite) TestCreateUserEmailAlreadyExist() {
_, err := s.Store.CreateUser(context.Background(), s.Fixtures.NewUserParams)
s.Require().NotNil(err)
- s.Require().Equal(("email already exists"), err.Error())
+ s.Require().Equal(("error creating user: email already exists"), err.Error())
}
func (s *UserTestSuite) TestCreateUserDBCreateErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.NewUserParams.Username).
- WillReturnRows(sqlmock.NewRows([]string{"id"}))
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE email = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.NewUserParams.Email).
- WillReturnRows(sqlmock.NewRows([]string{"id"}))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.NewUserParams.Username, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}))
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE email = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.NewUserParams.Email, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}))
s.Fixtures.SQLMock.
ExpectExec("INSERT INTO `users`").
WillReturnError(fmt.Errorf("creating user mock error"))
@@ -182,9 +190,9 @@ func (s *UserTestSuite) TestCreateUserDBCreateErr() {
_, err := s.StoreSQLMocked.CreateUser(context.Background(), s.Fixtures.NewUserParams)
- s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("creating user: creating user mock error", err.Error())
+ s.Require().Equal("error creating user: error creating user: creating user mock error", err.Error())
+ s.assertSQLMockExpectations()
}
func (s *UserTestSuite) TestHasAdminUserNoAdmin() {
@@ -222,7 +230,7 @@ func (s *UserTestSuite) TestGetUserNotFound() {
_, err := s.Store.GetUser(context.Background(), "dummy-user")
s.Require().NotNil(err)
- s.Require().Equal("fetching user: not found", err.Error())
+ s.Require().Equal("error fetching user: not found", err.Error())
}
func (s *UserTestSuite) TestGetUserByID() {
@@ -236,7 +244,7 @@ func (s *UserTestSuite) TestGetUserByIDNotFound() {
_, err := s.Store.GetUserByID(context.Background(), "dummy-user-id")
s.Require().NotNil(err)
- s.Require().Equal("fetching user: not found", err.Error())
+ s.Require().Equal("error fetching user: not found", err.Error())
}
func (s *UserTestSuite) TestUpdateUser() {
@@ -252,15 +260,15 @@ func (s *UserTestSuite) TestUpdateUserNotFound() {
_, err := s.Store.UpdateUser(context.Background(), "dummy-user", s.Fixtures.UpdateUserParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching user: not found", err.Error())
+ s.Require().Equal("error updating user: error fetching user: not found", err.Error())
}
func (s *UserTestSuite) TestUpdateUserDBSaveErr() {
- s.Fixtures.SQLMock.
- ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT 1")).
- WithArgs(s.Fixtures.Users[0].ID).
- WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Users[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
+ s.Fixtures.SQLMock.
+ ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT ?")).
+ WithArgs(s.Fixtures.Users[0].ID, 1).
+ WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Users[0].ID))
s.Fixtures.SQLMock.
ExpectExec(("UPDATE `users` SET")).
WillReturnError(fmt.Errorf("saving user mock error"))
@@ -270,7 +278,7 @@ func (s *UserTestSuite) TestUpdateUserDBSaveErr() {
s.assertSQLMockExpectations()
s.Require().NotNil(err)
- s.Require().Equal("saving user: saving user mock error", err.Error())
+ s.Require().Equal("error updating user: error saving user: saving user mock error", err.Error())
}
func TestUserTestSuite(t *testing.T) {
diff --git a/database/sql/util.go b/database/sql/util.go
index 31aa8ba3..9509aacf 100644
--- a/database/sql/util.go
+++ b/database/sql/util.go
@@ -15,23 +15,42 @@
package sql
import (
+ "context"
"encoding/json"
+ "errors"
"fmt"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/util"
-
- "github.com/pkg/errors"
- uuid "github.com/satori/go.uuid"
+ "github.com/google/uuid"
"gorm.io/datatypes"
"gorm.io/gorm"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm-provider-common/util"
+ "github.com/cloudbase/garm/auth"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
)
-func (s *sqlDatabase) sqlToParamsInstance(instance Instance) params.Instance {
+func (s *sqlDatabase) sqlToParamsInstance(instance Instance) (params.Instance, error) {
var id string
if instance.ProviderID != nil {
id = *instance.ProviderID
}
+
+ var labels []string
+ if len(instance.AditionalLabels) > 0 {
+ if err := json.Unmarshal(instance.AditionalLabels, &labels); err != nil {
+ return params.Instance{}, fmt.Errorf("error unmarshalling labels: %w", err)
+ }
+ }
+
+ var jitConfig map[string]string
+ if len(instance.JitConfiguration) > 0 {
+ if err := s.unsealAndUnmarshal(instance.JitConfiguration, &jitConfig); err != nil {
+ return params.Instance{}, fmt.Errorf("error unmarshalling jit configuration: %w", err)
+ }
+ }
ret := params.Instance{
ID: instance.ID.String(),
ProviderID: id,
@@ -43,14 +62,42 @@ func (s *sqlDatabase) sqlToParamsInstance(instance Instance) params.Instance {
OSArch: instance.OSArch,
Status: instance.Status,
RunnerStatus: instance.RunnerStatus,
- PoolID: instance.PoolID.String(),
CallbackURL: instance.CallbackURL,
MetadataURL: instance.MetadataURL,
StatusMessages: []params.StatusMessage{},
CreateAttempt: instance.CreateAttempt,
+ CreatedAt: instance.CreatedAt,
UpdatedAt: instance.UpdatedAt,
TokenFetched: instance.TokenFetched,
+ JitConfiguration: jitConfig,
GitHubRunnerGroup: instance.GitHubRunnerGroup,
+ AditionalLabels: labels,
+ }
+
+ if instance.ScaleSetFkID != nil {
+ ret.ScaleSetID = *instance.ScaleSetFkID
+ ret.ProviderName = instance.ScaleSet.ProviderName
+ }
+
+ if instance.PoolID != nil {
+ ret.PoolID = instance.PoolID.String()
+ ret.ProviderName = instance.Pool.ProviderName
+ }
+
+ if ret.ScaleSetID == 0 && ret.PoolID == "" {
+ return params.Instance{}, errors.New("missing pool or scale set id")
+ }
+
+ if ret.ScaleSetID != 0 && ret.PoolID != "" {
+ return params.Instance{}, errors.New("both pool and scale set ids are set")
+ }
+
+ if instance.Job != nil {
+ paramJob, err := sqlWorkflowJobToParamsJob(*instance.Job)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error converting job: %w", err)
+ }
+ ret.Job = ¶mJob
}
if len(instance.ProviderFault) > 0 {
@@ -69,65 +116,153 @@ func (s *sqlDatabase) sqlToParamsInstance(instance Instance) params.Instance {
EventLevel: msg.EventLevel,
})
}
- return ret
+ return ret, nil
}
-func (s *sqlDatabase) sqlAddressToParamsAddress(addr Address) params.Address {
- return params.Address{
+func (s *sqlDatabase) sqlAddressToParamsAddress(addr Address) commonParams.Address {
+ return commonParams.Address{
Address: addr.Address,
- Type: params.AddressType(addr.Type),
+ Type: commonParams.AddressType(addr.Type),
}
}
-func (s *sqlDatabase) sqlToCommonOrganization(org Organization) (params.Organization, error) {
+func (s *sqlDatabase) sqlToCommonOrganization(org Organization, detailed bool) (params.Organization, error) {
if len(org.WebhookSecret) == 0 {
return params.Organization{}, errors.New("missing secret")
}
- secret, err := util.Aes256DecodeString(org.WebhookSecret, s.cfg.Passphrase)
+ secret, err := util.Unseal(org.WebhookSecret, []byte(s.cfg.Passphrase))
if err != nil {
- return params.Organization{}, errors.Wrap(err, "decrypting secret")
+ return params.Organization{}, fmt.Errorf("error decrypting secret: %w", err)
}
+ endpoint, err := s.sqlToCommonGithubEndpoint(org.Endpoint)
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error converting endpoint: %w", err)
+ }
ret := params.Organization{
- ID: org.ID.String(),
- Name: org.Name,
- CredentialsName: org.CredentialsName,
- Pools: make([]params.Pool, len(org.Pools)),
- WebhookSecret: secret,
+ ID: org.ID.String(),
+ Name: org.Name,
+ CredentialsName: org.Credentials.Name,
+ Pools: make([]params.Pool, len(org.Pools)),
+ WebhookSecret: string(secret),
+ PoolBalancerType: org.PoolBalancerType,
+ Endpoint: endpoint,
+ CreatedAt: org.CreatedAt,
+ UpdatedAt: org.UpdatedAt,
+ }
+
+ var forgeCreds params.ForgeCredentials
+ if org.CredentialsID != nil {
+ ret.CredentialsID = *org.CredentialsID
+ forgeCreds, err = s.sqlToCommonForgeCredentials(org.Credentials)
+ }
+
+ if org.GiteaCredentialsID != nil {
+ ret.CredentialsID = *org.GiteaCredentialsID
+ forgeCreds, err = s.sqlGiteaToCommonForgeCredentials(org.GiteaCredentials)
+ }
+
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error converting credentials: %w", err)
+ }
+
+ if len(org.Events) > 0 {
+ ret.Events = make([]params.EntityEvent, len(org.Events))
+ for idx, event := range org.Events {
+ ret.Events[idx] = params.EntityEvent{
+ ID: event.ID,
+ Message: event.Message,
+ EventType: event.EventType,
+ EventLevel: event.EventLevel,
+ CreatedAt: event.CreatedAt,
+ }
+ }
+ }
+
+ if detailed {
+ ret.Credentials = forgeCreds
+ ret.CredentialsName = forgeCreds.Name
+ }
+
+ if ret.PoolBalancerType == "" {
+ ret.PoolBalancerType = params.PoolBalancerTypeRoundRobin
}
for idx, pool := range org.Pools {
- ret.Pools[idx] = s.sqlToCommonPool(pool)
+ ret.Pools[idx], err = s.sqlToCommonPool(pool)
+ if err != nil {
+ return params.Organization{}, fmt.Errorf("error converting pool: %w", err)
+ }
}
return ret, nil
}
-func (s *sqlDatabase) sqlToCommonEnterprise(enterprise Enterprise) (params.Enterprise, error) {
+func (s *sqlDatabase) sqlToCommonEnterprise(enterprise Enterprise, detailed bool) (params.Enterprise, error) {
if len(enterprise.WebhookSecret) == 0 {
return params.Enterprise{}, errors.New("missing secret")
}
- secret, err := util.Aes256DecodeString(enterprise.WebhookSecret, s.cfg.Passphrase)
+ secret, err := util.Unseal(enterprise.WebhookSecret, []byte(s.cfg.Passphrase))
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "decrypting secret")
+ return params.Enterprise{}, fmt.Errorf("error decrypting secret: %w", err)
}
+ endpoint, err := s.sqlToCommonGithubEndpoint(enterprise.Endpoint)
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error converting endpoint: %w", err)
+ }
ret := params.Enterprise{
- ID: enterprise.ID.String(),
- Name: enterprise.Name,
- CredentialsName: enterprise.CredentialsName,
- Pools: make([]params.Pool, len(enterprise.Pools)),
- WebhookSecret: secret,
+ ID: enterprise.ID.String(),
+ Name: enterprise.Name,
+ CredentialsName: enterprise.Credentials.Name,
+ Pools: make([]params.Pool, len(enterprise.Pools)),
+ WebhookSecret: string(secret),
+ PoolBalancerType: enterprise.PoolBalancerType,
+ CreatedAt: enterprise.CreatedAt,
+ UpdatedAt: enterprise.UpdatedAt,
+ Endpoint: endpoint,
+ }
+
+ if enterprise.CredentialsID != nil {
+ ret.CredentialsID = *enterprise.CredentialsID
+ }
+
+ if len(enterprise.Events) > 0 {
+ ret.Events = make([]params.EntityEvent, len(enterprise.Events))
+ for idx, event := range enterprise.Events {
+ ret.Events[idx] = params.EntityEvent{
+ ID: event.ID,
+ Message: event.Message,
+ EventType: event.EventType,
+ EventLevel: event.EventLevel,
+ CreatedAt: event.CreatedAt,
+ }
+ }
+ }
+
+ if detailed {
+ creds, err := s.sqlToCommonForgeCredentials(enterprise.Credentials)
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error converting credentials: %w", err)
+ }
+ ret.Credentials = creds
+ }
+
+ if ret.PoolBalancerType == "" {
+ ret.PoolBalancerType = params.PoolBalancerTypeRoundRobin
}
for idx, pool := range enterprise.Pools {
- ret.Pools[idx] = s.sqlToCommonPool(pool)
+ ret.Pools[idx], err = s.sqlToCommonPool(pool)
+ if err != nil {
+ return params.Enterprise{}, fmt.Errorf("error converting pool: %w", err)
+ }
}
return ret, nil
}
-func (s *sqlDatabase) sqlToCommonPool(pool Pool) params.Pool {
+func (s *sqlDatabase) sqlToCommonPool(pool Pool) (params.Pool, error) {
ret := params.Pool{
ID: pool.ID.String(),
ProviderName: pool.ProviderName,
@@ -146,34 +281,117 @@ func (s *sqlDatabase) sqlToCommonPool(pool Pool) params.Pool {
RunnerBootstrapTimeout: pool.RunnerBootstrapTimeout,
ExtraSpecs: json.RawMessage(pool.ExtraSpecs),
GitHubRunnerGroup: pool.GitHubRunnerGroup,
+ Priority: pool.Priority,
+ CreatedAt: pool.CreatedAt,
+ UpdatedAt: pool.UpdatedAt,
}
- if pool.RepoID != uuid.Nil {
+ var ep GithubEndpoint
+ if pool.RepoID != nil {
ret.RepoID = pool.RepoID.String()
if pool.Repository.Owner != "" && pool.Repository.Name != "" {
ret.RepoName = fmt.Sprintf("%s/%s", pool.Repository.Owner, pool.Repository.Name)
}
+ ep = pool.Repository.Endpoint
}
- if pool.OrgID != uuid.Nil && pool.Organization.Name != "" {
+ if pool.OrgID != nil && pool.Organization.Name != "" {
ret.OrgID = pool.OrgID.String()
ret.OrgName = pool.Organization.Name
+ ep = pool.Organization.Endpoint
}
- if pool.EnterpriseID != uuid.Nil && pool.Enterprise.Name != "" {
+ if pool.EnterpriseID != nil && pool.Enterprise.Name != "" {
ret.EnterpriseID = pool.EnterpriseID.String()
ret.EnterpriseName = pool.Enterprise.Name
+ ep = pool.Enterprise.Endpoint
}
+ endpoint, err := s.sqlToCommonGithubEndpoint(ep)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error converting endpoint: %w", err)
+ }
+ ret.Endpoint = endpoint
+
for idx, val := range pool.Tags {
ret.Tags[idx] = s.sqlToCommonTags(*val)
}
for idx, inst := range pool.Instances {
- ret.Instances[idx] = s.sqlToParamsInstance(inst)
+ ret.Instances[idx], err = s.sqlToParamsInstance(inst)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error converting instance: %w", err)
+ }
}
- return ret
+ return ret, nil
+}
+
+func (s *sqlDatabase) sqlToCommonScaleSet(scaleSet ScaleSet) (params.ScaleSet, error) {
+ ret := params.ScaleSet{
+ ID: scaleSet.ID,
+ CreatedAt: scaleSet.CreatedAt,
+ UpdatedAt: scaleSet.UpdatedAt,
+ ScaleSetID: scaleSet.ScaleSetID,
+ Name: scaleSet.Name,
+ DisableUpdate: scaleSet.DisableUpdate,
+
+ ProviderName: scaleSet.ProviderName,
+ MaxRunners: scaleSet.MaxRunners,
+ MinIdleRunners: scaleSet.MinIdleRunners,
+ RunnerPrefix: params.RunnerPrefix{
+ Prefix: scaleSet.RunnerPrefix,
+ },
+ Image: scaleSet.Image,
+ Flavor: scaleSet.Flavor,
+ OSArch: scaleSet.OSArch,
+ OSType: scaleSet.OSType,
+ Enabled: scaleSet.Enabled,
+ Instances: make([]params.Instance, len(scaleSet.Instances)),
+ RunnerBootstrapTimeout: scaleSet.RunnerBootstrapTimeout,
+ ExtraSpecs: json.RawMessage(scaleSet.ExtraSpecs),
+ GitHubRunnerGroup: scaleSet.GitHubRunnerGroup,
+ State: scaleSet.State,
+ ExtendedState: scaleSet.ExtendedState,
+ LastMessageID: scaleSet.LastMessageID,
+ DesiredRunnerCount: scaleSet.DesiredRunnerCount,
+ }
+
+ var ep GithubEndpoint
+ if scaleSet.RepoID != nil {
+ ret.RepoID = scaleSet.RepoID.String()
+ if scaleSet.Repository.Owner != "" && scaleSet.Repository.Name != "" {
+ ret.RepoName = fmt.Sprintf("%s/%s", scaleSet.Repository.Owner, scaleSet.Repository.Name)
+ }
+ ep = scaleSet.Repository.Endpoint
+ }
+
+ if scaleSet.OrgID != nil {
+ ret.OrgID = scaleSet.OrgID.String()
+ ret.OrgName = scaleSet.Organization.Name
+ ep = scaleSet.Organization.Endpoint
+ }
+
+ if scaleSet.EnterpriseID != nil {
+ ret.EnterpriseID = scaleSet.EnterpriseID.String()
+ ret.EnterpriseName = scaleSet.Enterprise.Name
+ ep = scaleSet.Enterprise.Endpoint
+ }
+
+ endpoint, err := s.sqlToCommonGithubEndpoint(ep)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error converting endpoint: %w", err)
+ }
+ ret.Endpoint = endpoint
+
+ for idx, inst := range scaleSet.Instances {
+ ret.Instances[idx], err = s.sqlToParamsInstance(inst)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error converting instance: %w", err)
+ }
+ }
+
+ return ret, nil
}
func (s *sqlDatabase) sqlToCommonTags(tag Tag) params.Tag {
@@ -183,26 +401,77 @@ func (s *sqlDatabase) sqlToCommonTags(tag Tag) params.Tag {
}
}
-func (s *sqlDatabase) sqlToCommonRepository(repo Repository) (params.Repository, error) {
+func (s *sqlDatabase) sqlToCommonRepository(repo Repository, detailed bool) (params.Repository, error) {
if len(repo.WebhookSecret) == 0 {
return params.Repository{}, errors.New("missing secret")
}
- secret, err := util.Aes256DecodeString(repo.WebhookSecret, s.cfg.Passphrase)
+ secret, err := util.Unseal(repo.WebhookSecret, []byte(s.cfg.Passphrase))
if err != nil {
- return params.Repository{}, errors.Wrap(err, "decrypting secret")
+ return params.Repository{}, fmt.Errorf("error decrypting secret: %w", err)
+ }
+ endpoint, err := s.sqlToCommonGithubEndpoint(repo.Endpoint)
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error converting endpoint: %w", err)
+ }
+ ret := params.Repository{
+ ID: repo.ID.String(),
+ Name: repo.Name,
+ Owner: repo.Owner,
+ CredentialsName: repo.Credentials.Name,
+ Pools: make([]params.Pool, len(repo.Pools)),
+ WebhookSecret: string(secret),
+ PoolBalancerType: repo.PoolBalancerType,
+ CreatedAt: repo.CreatedAt,
+ UpdatedAt: repo.UpdatedAt,
+ Endpoint: endpoint,
}
- ret := params.Repository{
- ID: repo.ID.String(),
- Name: repo.Name,
- Owner: repo.Owner,
- CredentialsName: repo.CredentialsName,
- Pools: make([]params.Pool, len(repo.Pools)),
- WebhookSecret: secret,
+ if repo.CredentialsID != nil && repo.GiteaCredentialsID != nil {
+ return params.Repository{}, runnerErrors.NewConflictError("both gitea and github credentials are set for repo %s", repo.Name)
+ }
+
+ var forgeCreds params.ForgeCredentials
+ if repo.CredentialsID != nil {
+ ret.CredentialsID = *repo.CredentialsID
+ forgeCreds, err = s.sqlToCommonForgeCredentials(repo.Credentials)
+ }
+
+ if repo.GiteaCredentialsID != nil {
+ ret.CredentialsID = *repo.GiteaCredentialsID
+ forgeCreds, err = s.sqlGiteaToCommonForgeCredentials(repo.GiteaCredentials)
+ }
+
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error converting credentials: %w", err)
+ }
+
+ if len(repo.Events) > 0 {
+ ret.Events = make([]params.EntityEvent, len(repo.Events))
+ for idx, event := range repo.Events {
+ ret.Events[idx] = params.EntityEvent{
+ ID: event.ID,
+ Message: event.Message,
+ EventType: event.EventType,
+ EventLevel: event.EventLevel,
+ CreatedAt: event.CreatedAt,
+ }
+ }
+ }
+
+ if detailed {
+ ret.Credentials = forgeCreds
+ ret.CredentialsName = forgeCreds.Name
+ }
+
+ if ret.PoolBalancerType == "" {
+ ret.PoolBalancerType = params.PoolBalancerTypeRoundRobin
}
for idx, pool := range repo.Pools {
- ret.Pools[idx] = s.sqlToCommonPool(pool)
+ ret.Pools[idx], err = s.sqlToCommonPool(pool)
+ if err != nil {
+ return params.Repository{}, fmt.Errorf("error converting pool: %w", err)
+ }
}
return ret, nil
@@ -210,39 +479,39 @@ func (s *sqlDatabase) sqlToCommonRepository(repo Repository) (params.Repository,
func (s *sqlDatabase) sqlToParamsUser(user User) params.User {
return params.User{
- ID: user.ID.String(),
- CreatedAt: user.CreatedAt,
- UpdatedAt: user.UpdatedAt,
- Email: user.Email,
- Username: user.Username,
- FullName: user.FullName,
- Password: user.Password,
- Enabled: user.Enabled,
- IsAdmin: user.IsAdmin,
+ ID: user.ID.String(),
+ CreatedAt: user.CreatedAt,
+ UpdatedAt: user.UpdatedAt,
+ Email: user.Email,
+ Username: user.Username,
+ FullName: user.FullName,
+ Password: user.Password,
+ Enabled: user.Enabled,
+ IsAdmin: user.IsAdmin,
+ Generation: user.Generation,
}
}
-func (s *sqlDatabase) getOrCreateTag(tagName string) (Tag, error) {
+func (s *sqlDatabase) getOrCreateTag(tx *gorm.DB, tagName string) (Tag, error) {
var tag Tag
- q := s.conn.Where("name = ?", tagName).First(&tag)
+ q := tx.Where("name = ? COLLATE NOCASE", tagName).First(&tag)
if q.Error == nil {
return tag, nil
}
if !errors.Is(q.Error, gorm.ErrRecordNotFound) {
- return Tag{}, errors.Wrap(q.Error, "fetching tag from database")
+ return Tag{}, fmt.Errorf("error fetching tag from database: %w", q.Error)
}
newTag := Tag{
Name: tagName,
}
- q = s.conn.Create(&newTag)
- if q.Error != nil {
- return Tag{}, errors.Wrap(q.Error, "creating tag")
+ if err := tx.Create(&newTag).Error; err != nil {
+ return Tag{}, fmt.Errorf("error creating tag: %w", err)
}
return newTag, nil
}
-func (s *sqlDatabase) updatePool(pool Pool, param params.UpdatePoolParams) (params.Pool, error) {
+func (s *sqlDatabase) updatePool(tx *gorm.DB, pool Pool, param params.UpdatePoolParams) (params.Pool, error) {
if param.Enabled != nil && pool.Enabled != *param.Enabled {
pool.Enabled = *param.Enabled
}
@@ -287,24 +556,410 @@ func (s *sqlDatabase) updatePool(pool Pool, param params.UpdatePoolParams) (para
pool.GitHubRunnerGroup = *param.GitHubRunnerGroup
}
- if q := s.conn.Save(&pool); q.Error != nil {
- return params.Pool{}, errors.Wrap(q.Error, "saving database entry")
+ if param.Priority != nil {
+ pool.Priority = *param.Priority
+ }
+
+ if q := tx.Save(&pool); q.Error != nil {
+ return params.Pool{}, fmt.Errorf("error saving database entry: %w", q.Error)
}
tags := []Tag{}
- if param.Tags != nil && len(param.Tags) > 0 {
+ if len(param.Tags) > 0 {
for _, val := range param.Tags {
- t, err := s.getOrCreateTag(val)
+ t, err := s.getOrCreateTag(tx, val)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching tag")
+ return params.Pool{}, fmt.Errorf("error fetching tag: %w", err)
}
tags = append(tags, t)
}
- if err := s.conn.Model(&pool).Association("Tags").Replace(&tags); err != nil {
- return params.Pool{}, errors.Wrap(err, "replacing tags")
+ if err := tx.Model(&pool).Association("Tags").Replace(&tags); err != nil {
+ return params.Pool{}, fmt.Errorf("error replacing tags: %w", err)
}
}
- return s.sqlToCommonPool(pool), nil
+ return s.sqlToCommonPool(pool)
+}
+
+func (s *sqlDatabase) getPoolByID(tx *gorm.DB, poolID string, preload ...string) (Pool, error) {
+ u, err := uuid.Parse(poolID)
+ if err != nil {
+ return Pool{}, fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+ var pool Pool
+ q := tx.Model(&Pool{})
+ if len(preload) > 0 {
+ for _, item := range preload {
+ q = q.Preload(item)
+ }
+ }
+
+ q = q.Where("id = ?", u).First(&pool)
+
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return Pool{}, runnerErrors.ErrNotFound
+ }
+ return Pool{}, fmt.Errorf("error fetching org from database: %w", q.Error)
+ }
+ return pool, nil
+}
+
+func (s *sqlDatabase) getScaleSetByID(tx *gorm.DB, scaleSetID uint, preload ...string) (ScaleSet, error) {
+ var scaleSet ScaleSet
+ q := tx.Model(&ScaleSet{})
+ if len(preload) > 0 {
+ for _, item := range preload {
+ q = q.Preload(item)
+ }
+ }
+
+ q = q.Where("id = ?", scaleSetID).First(&scaleSet)
+
+ if q.Error != nil {
+ if errors.Is(q.Error, gorm.ErrRecordNotFound) {
+ return ScaleSet{}, runnerErrors.ErrNotFound
+ }
+ return ScaleSet{}, fmt.Errorf("error fetching scale set from database: %w", q.Error)
+ }
+ return scaleSet, nil
+}
+
+func (s *sqlDatabase) hasGithubEntity(tx *gorm.DB, entityType params.ForgeEntityType, entityID string) error {
+ u, err := uuid.Parse(entityID)
+ if err != nil {
+ return fmt.Errorf("error parsing id: %w", runnerErrors.ErrBadRequest)
+ }
+ var q *gorm.DB
+ switch entityType {
+ case params.ForgeEntityTypeRepository:
+ q = tx.Model(&Repository{}).Where("id = ?", u)
+ case params.ForgeEntityTypeOrganization:
+ q = tx.Model(&Organization{}).Where("id = ?", u)
+ case params.ForgeEntityTypeEnterprise:
+ q = tx.Model(&Enterprise{}).Where("id = ?", u)
+ default:
+ return fmt.Errorf("error invalid entity type: %w", runnerErrors.ErrBadRequest)
+ }
+
+ var entity interface{}
+ if err := q.First(entity).Error; err != nil {
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return fmt.Errorf("error entity not found: %w", runnerErrors.ErrNotFound)
+ }
+ return fmt.Errorf("error fetching entity from database: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) marshalAndSeal(data interface{}) ([]byte, error) {
+ enc, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("error marshalling data: %w", err)
+ }
+ return util.Seal(enc, []byte(s.cfg.Passphrase))
+}
+
+func (s *sqlDatabase) unsealAndUnmarshal(data []byte, target interface{}) error {
+ decrypted, err := util.Unseal(data, []byte(s.cfg.Passphrase))
+ if err != nil {
+ return fmt.Errorf("error decrypting data: %w", err)
+ }
+ if err := json.Unmarshal(decrypted, target); err != nil {
+ return fmt.Errorf("error unmarshalling data: %w", err)
+ }
+ return nil
+}
+
+func (s *sqlDatabase) sendNotify(entityType dbCommon.DatabaseEntityType, op dbCommon.OperationType, payload interface{}) error {
+ if s.producer == nil {
+ // no producer was registered. Not sending notifications.
+ return nil
+ }
+ if payload == nil {
+ return errors.New("missing payload")
+ }
+ message := dbCommon.ChangePayload{
+ Operation: op,
+ Payload: payload,
+ EntityType: entityType,
+ }
+ return s.producer.Notify(message)
+}
+
+func (s *sqlDatabase) GetForgeEntity(_ context.Context, entityType params.ForgeEntityType, entityID string) (params.ForgeEntity, error) {
+ var ghEntity params.EntityGetter
+ var err error
+ switch entityType {
+ case params.ForgeEntityTypeEnterprise:
+ ghEntity, err = s.GetEnterpriseByID(s.ctx, entityID)
+ case params.ForgeEntityTypeOrganization:
+ ghEntity, err = s.GetOrganizationByID(s.ctx, entityID)
+ case params.ForgeEntityTypeRepository:
+ ghEntity, err = s.GetRepositoryByID(s.ctx, entityID)
+ default:
+ return params.ForgeEntity{}, fmt.Errorf("error invalid entity type: %w", runnerErrors.ErrBadRequest)
+ }
+ if err != nil {
+ return params.ForgeEntity{}, fmt.Errorf("error failed to get entity from db: %w", err)
+ }
+
+ entity, err := ghEntity.GetEntity()
+ if err != nil {
+ return params.ForgeEntity{}, fmt.Errorf("error failed to get entity: %w", err)
+ }
+ return entity, nil
+}
+
+func (s *sqlDatabase) addRepositoryEvent(ctx context.Context, repoID string, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error {
+ repo, err := s.getRepoByID(ctx, s.conn, repoID)
+ if err != nil {
+ return fmt.Errorf("error updating instance: %w", err)
+ }
+
+ msg := RepositoryEvent{
+ Message: statusMessage,
+ EventType: event,
+ EventLevel: eventLevel,
+ }
+
+ if err := s.conn.Model(&repo).Association("Events").Append(&msg); err != nil {
+ return fmt.Errorf("error adding status message: %w", err)
+ }
+
+ if maxEvents > 0 {
+ var latestEvents []RepositoryEvent
+ q := s.conn.Model(&RepositoryEvent{}).
+ Limit(maxEvents).Order("id desc").
+ Where("repo_id = ?", repo.ID).Find(&latestEvents)
+ if q.Error != nil {
+ return fmt.Errorf("error fetching latest events: %w", q.Error)
+ }
+ if len(latestEvents) == maxEvents {
+ lastInList := latestEvents[len(latestEvents)-1]
+ if err := s.conn.Where("repo_id = ? and id < ?", repo.ID, lastInList.ID).Unscoped().Delete(&RepositoryEvent{}).Error; err != nil {
+ return fmt.Errorf("error deleting old events: %w", err)
+ }
+ }
+ }
+ return nil
+}
+
+func (s *sqlDatabase) addOrgEvent(ctx context.Context, orgID string, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error {
+ org, err := s.getOrgByID(ctx, s.conn, orgID)
+ if err != nil {
+ return fmt.Errorf("error updating instance: %w", err)
+ }
+
+ msg := OrganizationEvent{
+ Message: statusMessage,
+ EventType: event,
+ EventLevel: eventLevel,
+ }
+
+ if err := s.conn.Model(&org).Association("Events").Append(&msg); err != nil {
+ return fmt.Errorf("error adding status message: %w", err)
+ }
+
+ if maxEvents > 0 {
+ var latestEvents []OrganizationEvent
+ q := s.conn.Model(&OrganizationEvent{}).
+ Limit(maxEvents).Order("id desc").
+ Where("org_id = ?", org.ID).Find(&latestEvents)
+ if q.Error != nil {
+ return fmt.Errorf("error fetching latest events: %w", q.Error)
+ }
+ if len(latestEvents) == maxEvents {
+ lastInList := latestEvents[len(latestEvents)-1]
+ if err := s.conn.Where("org_id = ? and id < ?", org.ID, lastInList.ID).Unscoped().Delete(&OrganizationEvent{}).Error; err != nil {
+ return fmt.Errorf("error deleting old events: %w", err)
+ }
+ }
+ }
+ return nil
+}
+
+func (s *sqlDatabase) addEnterpriseEvent(ctx context.Context, entID string, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error {
+ ent, err := s.getEnterpriseByID(ctx, s.conn, entID)
+ if err != nil {
+ return fmt.Errorf("error updating instance: %w", err)
+ }
+
+ msg := EnterpriseEvent{
+ Message: statusMessage,
+ EventType: event,
+ EventLevel: eventLevel,
+ }
+
+ if err := s.conn.Model(&ent).Association("Events").Append(&msg); err != nil {
+ return fmt.Errorf("error adding status message: %w", err)
+ }
+
+ if maxEvents > 0 {
+ var latestEvents []EnterpriseEvent
+ q := s.conn.Model(&EnterpriseEvent{}).
+ Limit(maxEvents).Order("id desc").
+ Where("enterprise_id = ?", ent.ID).Find(&latestEvents)
+ if q.Error != nil {
+ return fmt.Errorf("error fetching latest events: %w", q.Error)
+ }
+ if len(latestEvents) == maxEvents {
+ lastInList := latestEvents[len(latestEvents)-1]
+ if err := s.conn.Where("enterprise_id = ? and id < ?", ent.ID, lastInList.ID).Unscoped().Delete(&EnterpriseEvent{}).Error; err != nil {
+ return fmt.Errorf("error deleting old events: %w", err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *sqlDatabase) AddEntityEvent(ctx context.Context, entity params.ForgeEntity, event params.EventType, eventLevel params.EventLevel, statusMessage string, maxEvents int) error {
+ if maxEvents == 0 {
+ return fmt.Errorf("max events cannot be 0: %w", runnerErrors.ErrBadRequest)
+ }
+
+ switch entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ return s.addRepositoryEvent(ctx, entity.ID, event, eventLevel, statusMessage, maxEvents)
+ case params.ForgeEntityTypeOrganization:
+ return s.addOrgEvent(ctx, entity.ID, event, eventLevel, statusMessage, maxEvents)
+ case params.ForgeEntityTypeEnterprise:
+ return s.addEnterpriseEvent(ctx, entity.ID, event, eventLevel, statusMessage, maxEvents)
+ default:
+ return fmt.Errorf("invalid entity type: %w", runnerErrors.ErrBadRequest)
+ }
+}
+
+func (s *sqlDatabase) sqlToCommonForgeCredentials(creds GithubCredentials) (params.ForgeCredentials, error) {
+ if len(creds.Payload) == 0 {
+ return params.ForgeCredentials{}, errors.New("empty credentials payload")
+ }
+ data, err := util.Unseal(creds.Payload, []byte(s.cfg.Passphrase))
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error unsealing credentials: %w", err)
+ }
+
+ ep, err := s.sqlToCommonGithubEndpoint(creds.Endpoint)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github endpoint: %w", err)
+ }
+
+ commonCreds := params.ForgeCredentials{
+ ID: creds.ID,
+ Name: creds.Name,
+ Description: creds.Description,
+ APIBaseURL: creds.Endpoint.APIBaseURL,
+ BaseURL: creds.Endpoint.BaseURL,
+ UploadBaseURL: creds.Endpoint.UploadBaseURL,
+ CABundle: creds.Endpoint.CACertBundle,
+ AuthType: creds.AuthType,
+ CreatedAt: creds.CreatedAt,
+ UpdatedAt: creds.UpdatedAt,
+ ForgeType: creds.Endpoint.EndpointType,
+ Endpoint: ep,
+ CredentialsPayload: data,
+ }
+
+ for _, repo := range creds.Repositories {
+ commonRepo, err := s.sqlToCommonRepository(repo, false)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github repository: %w", err)
+ }
+ commonCreds.Repositories = append(commonCreds.Repositories, commonRepo)
+ }
+
+ for _, org := range creds.Organizations {
+ commonOrg, err := s.sqlToCommonOrganization(org, false)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github organization: %w", err)
+ }
+ commonCreds.Organizations = append(commonCreds.Organizations, commonOrg)
+ }
+
+ for _, ent := range creds.Enterprises {
+ commonEnt, err := s.sqlToCommonEnterprise(ent, false)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github enterprise %s: %w", ent.Name, err)
+ }
+ commonCreds.Enterprises = append(commonCreds.Enterprises, commonEnt)
+ }
+
+ return commonCreds, nil
+}
+
+func (s *sqlDatabase) sqlGiteaToCommonForgeCredentials(creds GiteaCredentials) (params.ForgeCredentials, error) {
+ if len(creds.Payload) == 0 {
+ return params.ForgeCredentials{}, errors.New("empty credentials payload")
+ }
+ data, err := util.Unseal(creds.Payload, []byte(s.cfg.Passphrase))
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error unsealing credentials: %w", err)
+ }
+
+ ep, err := s.sqlToCommonGithubEndpoint(creds.Endpoint)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github endpoint: %w", err)
+ }
+
+ commonCreds := params.ForgeCredentials{
+ ID: creds.ID,
+ Name: creds.Name,
+ Description: creds.Description,
+ APIBaseURL: creds.Endpoint.APIBaseURL,
+ BaseURL: creds.Endpoint.BaseURL,
+ CABundle: creds.Endpoint.CACertBundle,
+ AuthType: creds.AuthType,
+ CreatedAt: creds.CreatedAt,
+ UpdatedAt: creds.UpdatedAt,
+ ForgeType: creds.Endpoint.EndpointType,
+ Endpoint: ep,
+ CredentialsPayload: data,
+ }
+
+ for _, repo := range creds.Repositories {
+ commonRepo, err := s.sqlToCommonRepository(repo, false)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github repository: %w", err)
+ }
+ commonCreds.Repositories = append(commonCreds.Repositories, commonRepo)
+ }
+
+ for _, org := range creds.Organizations {
+ commonOrg, err := s.sqlToCommonOrganization(org, false)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error converting github organization: %w", err)
+ }
+ commonCreds.Organizations = append(commonCreds.Organizations, commonOrg)
+ }
+
+ return commonCreds, nil
+}
+
+func (s *sqlDatabase) sqlToCommonGithubEndpoint(ep GithubEndpoint) (params.ForgeEndpoint, error) {
+ return params.ForgeEndpoint{
+ Name: ep.Name,
+ Description: ep.Description,
+ APIBaseURL: ep.APIBaseURL,
+ BaseURL: ep.BaseURL,
+ UploadBaseURL: ep.UploadBaseURL,
+ CACertBundle: ep.CACertBundle,
+ CreatedAt: ep.CreatedAt,
+ EndpointType: ep.EndpointType,
+ UpdatedAt: ep.UpdatedAt,
+ }, nil
+}
+
+func getUIDFromContext(ctx context.Context) (uuid.UUID, error) {
+ userID := auth.UserID(ctx)
+ if userID == "" {
+ return uuid.Nil, fmt.Errorf("error getting UID from context: %w", runnerErrors.ErrUnauthorized)
+ }
+
+ asUUID, err := uuid.Parse(userID)
+ if err != nil {
+ return uuid.Nil, fmt.Errorf("error parsing UID from context: %w", runnerErrors.ErrUnauthorized)
+ }
+ return asUUID, nil
}
diff --git a/database/watcher/consumer.go b/database/watcher/consumer.go
new file mode 100644
index 00000000..ed0967e9
--- /dev/null
+++ b/database/watcher/consumer.go
@@ -0,0 +1,98 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package watcher
+
+import (
+ "context"
+ "log/slog"
+ "sync"
+ "time"
+
+ "github.com/cloudbase/garm/database/common"
+)
+
+type consumer struct {
+ messages chan common.ChangePayload
+ filters []common.PayloadFilterFunc
+ id string
+
+ mux sync.Mutex
+ closed bool
+ quit chan struct{}
+ ctx context.Context
+}
+
+func (w *consumer) SetFilters(filters ...common.PayloadFilterFunc) {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ w.filters = filters
+}
+
+func (w *consumer) Watch() <-chan common.ChangePayload {
+ return w.messages
+}
+
+func (w *consumer) Close() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if w.closed {
+ return
+ }
+ close(w.messages)
+ close(w.quit)
+ w.closed = true
+}
+
+func (w *consumer) IsClosed() bool {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ return w.closed
+}
+
+func (w *consumer) Send(payload common.ChangePayload) {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+
+ if w.closed {
+ return
+ }
+
+ if len(w.filters) > 0 {
+ shouldSend := true
+ for _, filter := range w.filters {
+ if !filter(payload) {
+ shouldSend = false
+ break
+ }
+ }
+
+ if !shouldSend {
+ return
+ }
+ }
+
+ timer := time.NewTimer(1 * time.Second)
+ defer timer.Stop()
+ slog.DebugContext(w.ctx, "sending payload")
+ select {
+ case <-w.quit:
+ slog.DebugContext(w.ctx, "consumer is closed")
+ case <-w.ctx.Done():
+ slog.DebugContext(w.ctx, "consumer is closed")
+ case <-timer.C:
+ slog.DebugContext(w.ctx, "timeout trying to send payload", "payload", payload)
+ case w.messages <- payload:
+ }
+}
diff --git a/database/watcher/filters.go b/database/watcher/filters.go
new file mode 100644
index 00000000..acf79ba8
--- /dev/null
+++ b/database/watcher/filters.go
@@ -0,0 +1,338 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package watcher
+
+import (
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+)
+
+type IDGetter interface {
+ GetID() string
+}
+
+// WithAny returns a filter function that returns true if any of the provided filters return true.
+// This filter is useful if for example you want to watch for update operations on any of the supplied
+// entities.
+// Example:
+//
+// // Watch for any update operation on repositories or organizations
+// consumer.SetFilters(
+// watcher.WithOperationTypeFilter(common.UpdateOperation),
+// watcher.WithAny(
+// watcher.WithEntityTypeFilter(common.RepositoryEntityType),
+// watcher.WithEntityTypeFilter(common.OrganizationEntityType),
+// ))
+func WithAny(filters ...dbCommon.PayloadFilterFunc) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ for _, filter := range filters {
+ if filter(payload) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// WithAll returns a filter function that returns true if all of the provided filters return true.
+func WithAll(filters ...dbCommon.PayloadFilterFunc) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ for _, filter := range filters {
+ if !filter(payload) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// WithEntityTypeFilter returns a filter function that filters payloads by entity type.
+// The filter function returns true if the payload's entity type matches the provided entity type.
+func WithEntityTypeFilter(entityType dbCommon.DatabaseEntityType) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ return payload.EntityType == entityType
+ }
+}
+
+// WithOperationTypeFilter returns a filter function that filters payloads by operation type.
+func WithOperationTypeFilter(operationType dbCommon.OperationType) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ return payload.Operation == operationType
+ }
+}
+
+// WithEntityPoolFilter returns true if the change payload is a pool that belongs to the
+// supplied Github entity. This is useful when an entity worker wants to watch for changes
+// in pools that belong to it.
+func WithEntityPoolFilter(ghEntity params.ForgeEntity) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ switch payload.EntityType {
+ case dbCommon.PoolEntityType:
+ pool, ok := payload.Payload.(params.Pool)
+ if !ok {
+ return false
+ }
+ switch ghEntity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ return pool.RepoID == ghEntity.ID
+ case params.ForgeEntityTypeOrganization:
+ return pool.OrgID == ghEntity.ID
+ case params.ForgeEntityTypeEnterprise:
+ return pool.EnterpriseID == ghEntity.ID
+ default:
+ return false
+ }
+ default:
+ return false
+ }
+ }
+}
+
+// WithEntityScaleSetFilter returns true if the change payload is a scale set that belongs to the
+// supplied Github entity.
+func WithEntityScaleSetFilter(ghEntity params.ForgeEntity) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ forgeType, err := ghEntity.GetForgeType()
+ if err != nil {
+ return false
+ }
+
+ // Gitea does not have scale sets.
+ if forgeType == params.GiteaEndpointType {
+ return false
+ }
+
+ switch payload.EntityType {
+ case dbCommon.ScaleSetEntityType:
+ scaleSet, ok := payload.Payload.(params.ScaleSet)
+ if !ok {
+ return false
+ }
+ switch ghEntity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ return scaleSet.RepoID == ghEntity.ID
+ case params.ForgeEntityTypeOrganization:
+ return scaleSet.OrgID == ghEntity.ID
+ case params.ForgeEntityTypeEnterprise:
+ return scaleSet.EnterpriseID == ghEntity.ID
+ default:
+ return false
+ }
+ default:
+ return false
+ }
+ }
+}
+
+// WithEntityFilter returns a filter function that filters payloads by entity.
+// Change payloads that match the entity type and ID will return true.
+func WithEntityFilter(entity params.ForgeEntity) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if params.ForgeEntityType(payload.EntityType) != entity.EntityType {
+ return false
+ }
+ var ent IDGetter
+ var ok bool
+ switch payload.EntityType {
+ case dbCommon.RepositoryEntityType:
+ if entity.EntityType != params.ForgeEntityTypeRepository {
+ return false
+ }
+ ent, ok = payload.Payload.(params.Repository)
+ case dbCommon.OrganizationEntityType:
+ if entity.EntityType != params.ForgeEntityTypeOrganization {
+ return false
+ }
+ ent, ok = payload.Payload.(params.Organization)
+ case dbCommon.EnterpriseEntityType:
+ if entity.EntityType != params.ForgeEntityTypeEnterprise {
+ return false
+ }
+ ent, ok = payload.Payload.(params.Enterprise)
+ default:
+ return false
+ }
+ if !ok {
+ return false
+ }
+ return ent.GetID() == entity.ID
+ }
+}
+
+func WithEntityJobFilter(ghEntity params.ForgeEntity) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ switch payload.EntityType {
+ case dbCommon.JobEntityType:
+ job, ok := payload.Payload.(params.Job)
+ if !ok {
+ return false
+ }
+
+ switch ghEntity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ if job.RepoID != nil && job.RepoID.String() != ghEntity.ID {
+ return false
+ }
+ case params.ForgeEntityTypeOrganization:
+ if job.OrgID != nil && job.OrgID.String() != ghEntity.ID {
+ return false
+ }
+ case params.ForgeEntityTypeEnterprise:
+ if job.EnterpriseID != nil && job.EnterpriseID.String() != ghEntity.ID {
+ return false
+ }
+ default:
+ return false
+ }
+
+ return true
+ default:
+ return false
+ }
+ }
+}
+
+// WithForgeCredentialsFilter returns a filter function that filters payloads by Github or Gitea credentials.
+func WithForgeCredentialsFilter(creds params.ForgeCredentials) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ var forgeCreds params.ForgeCredentials
+ var ok bool
+ switch payload.EntityType {
+ case dbCommon.GithubCredentialsEntityType, dbCommon.GiteaCredentialsEntityType:
+ forgeCreds, ok = payload.Payload.(params.ForgeCredentials)
+ default:
+ return false
+ }
+ if !ok {
+ return false
+ }
+ // Gite and Github creds have different models. The ID is uint, so we
+ // need to explicitly check their type, or risk a clash.
+ if forgeCreds.ForgeType != creds.ForgeType {
+ return false
+ }
+ return forgeCreds.GetID() == creds.GetID()
+ }
+}
+
+// WithUserIDFilter returns a filter function that filters payloads by user ID.
+func WithUserIDFilter(userID string) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if payload.EntityType != dbCommon.UserEntityType {
+ return false
+ }
+ userPayload, ok := payload.Payload.(params.User)
+ if !ok {
+ return false
+ }
+ return userPayload.ID == userID
+ }
+}
+
+// WithNone returns a filter function that always returns false.
+func WithNone() dbCommon.PayloadFilterFunc {
+ return func(_ dbCommon.ChangePayload) bool {
+ return false
+ }
+}
+
+// WithEverything returns a filter function that always returns true.
+func WithEverything() dbCommon.PayloadFilterFunc {
+ return func(_ dbCommon.ChangePayload) bool {
+ return true
+ }
+}
+
+// WithExcludeEntityTypeFilter returns a filter function that filters payloads by excluding
+// the provided entity type.
+func WithExcludeEntityTypeFilter(entityType dbCommon.DatabaseEntityType) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ return payload.EntityType != entityType
+ }
+}
+
+// WithScaleSetFilter returns a filter function that matches a particular scale set.
+func WithScaleSetFilter(scaleset params.ScaleSet) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if payload.EntityType != dbCommon.ScaleSetEntityType {
+ return false
+ }
+
+ ss, ok := payload.Payload.(params.ScaleSet)
+ if !ok {
+ return false
+ }
+
+ return ss.ID == scaleset.ID
+ }
+}
+
+func WithScaleSetInstanceFilter(scaleset params.ScaleSet) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if payload.EntityType != dbCommon.InstanceEntityType {
+ return false
+ }
+
+ instance, ok := payload.Payload.(params.Instance)
+ if !ok || instance.ScaleSetID == 0 {
+ return false
+ }
+
+ return instance.ScaleSetID == scaleset.ID
+ }
+}
+
+// EntityTypeCallbackFilter is a callback function that takes a ChangePayload and returns a boolean.
+// This callback type is used in the WithEntityTypeAndCallbackFilter (and potentially others) when
+// a filter needs to delegate logic to a specific callback function.
+type EntityTypeCallbackFilter func(payload dbCommon.ChangePayload) (bool, error)
+
+// WithEntityTypeAndCallbackFilter returns a filter function that filters payloads by entity type and the
+// result of a callback function.
+func WithEntityTypeAndCallbackFilter(entityType dbCommon.DatabaseEntityType, callback EntityTypeCallbackFilter) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if payload.EntityType != entityType {
+ return false
+ }
+
+ ok, err := callback(payload)
+ if err != nil {
+ return false
+ }
+ return ok
+ }
+}
+
+func WithInstanceStatusFilter(statuses ...commonParams.InstanceStatus) dbCommon.PayloadFilterFunc {
+ return func(payload dbCommon.ChangePayload) bool {
+ if payload.EntityType != dbCommon.InstanceEntityType {
+ return false
+ }
+ instance, ok := payload.Payload.(params.Instance)
+ if !ok {
+ return false
+ }
+ if len(statuses) == 0 {
+ return false
+ }
+ for _, status := range statuses {
+ if instance.Status == status {
+ return true
+ }
+ }
+ return false
+ }
+}
diff --git a/database/watcher/producer.go b/database/watcher/producer.go
new file mode 100644
index 00000000..927aada0
--- /dev/null
+++ b/database/watcher/producer.go
@@ -0,0 +1,72 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package watcher
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/cloudbase/garm/database/common"
+)
+
+type producer struct {
+ closed bool
+ mux sync.Mutex
+ id string
+
+ messages chan common.ChangePayload
+ quit chan struct{}
+ ctx context.Context
+}
+
+func (w *producer) Notify(payload common.ChangePayload) error {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+
+ if w.closed {
+ return common.ErrProducerClosed
+ }
+
+ timer := time.NewTimer(1 * time.Second)
+ defer timer.Stop()
+ select {
+ case <-w.quit:
+ return common.ErrProducerClosed
+ case <-w.ctx.Done():
+ return common.ErrProducerClosed
+ case <-timer.C:
+ return common.ErrProducerTimeoutErr
+ case w.messages <- payload:
+ }
+ return nil
+}
+
+func (w *producer) Close() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if w.closed {
+ return
+ }
+ w.closed = true
+ close(w.messages)
+ close(w.quit)
+}
+
+func (w *producer) IsClosed() bool {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ return w.closed
+}
diff --git a/database/watcher/test_export.go b/database/watcher/test_export.go
new file mode 100644
index 00000000..eb3d38b6
--- /dev/null
+++ b/database/watcher/test_export.go
@@ -0,0 +1,30 @@
+//go:build testing
+// +build testing
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package watcher
+
+import "github.com/cloudbase/garm/database/common"
+
+// SetWatcher sets the watcher to be used by the database package.
+// This function is intended for use in tests only.
+func SetWatcher(w common.Watcher) {
+ databaseWatcher = w
+}
+
+// GetWatcher returns the current watcher.
+func GetWatcher() common.Watcher {
+ return databaseWatcher
+}
diff --git a/database/watcher/util_test.go b/database/watcher/util_test.go
new file mode 100644
index 00000000..82b94491
--- /dev/null
+++ b/database/watcher/util_test.go
@@ -0,0 +1,16 @@
+package watcher_test
+
+import (
+ "time"
+
+ "github.com/cloudbase/garm/database/common"
+)
+
+func waitForPayload(ch <-chan common.ChangePayload, timeout time.Duration) *common.ChangePayload {
+ select {
+ case payload := <-ch:
+ return &payload
+ case <-time.After(timeout):
+ return nil
+ }
+}
diff --git a/database/watcher/watcher.go b/database/watcher/watcher.go
new file mode 100644
index 00000000..804dec70
--- /dev/null
+++ b/database/watcher/watcher.go
@@ -0,0 +1,204 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package watcher
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "sync"
+
+ "github.com/cloudbase/garm/database/common"
+ garmUtil "github.com/cloudbase/garm/util"
+)
+
+var databaseWatcher common.Watcher
+
+func InitWatcher(ctx context.Context) {
+ if databaseWatcher != nil {
+ return
+ }
+ ctx = garmUtil.WithSlogContext(ctx, slog.Any("watcher", "database"))
+ w := &watcher{
+ producers: make(map[string]*producer),
+ consumers: make(map[string]*consumer),
+ quit: make(chan struct{}),
+ ctx: ctx,
+ }
+
+ go w.loop()
+ databaseWatcher = w
+}
+
+func CloseWatcher() error {
+ if databaseWatcher == nil {
+ return nil
+ }
+ databaseWatcher.Close()
+ databaseWatcher = nil
+ return nil
+}
+
+func RegisterProducer(ctx context.Context, id string) (common.Producer, error) {
+ if databaseWatcher == nil {
+ return nil, common.ErrWatcherNotInitialized
+ }
+ ctx = garmUtil.WithSlogContext(ctx, slog.Any("producer_id", id))
+ return databaseWatcher.RegisterProducer(ctx, id)
+}
+
+func RegisterConsumer(ctx context.Context, id string, filters ...common.PayloadFilterFunc) (common.Consumer, error) {
+ if databaseWatcher == nil {
+ return nil, common.ErrWatcherNotInitialized
+ }
+ ctx = garmUtil.WithSlogContext(ctx, slog.Any("consumer_id", id))
+ return databaseWatcher.RegisterConsumer(ctx, id, filters...)
+}
+
+type watcher struct {
+ producers map[string]*producer
+ consumers map[string]*consumer
+
+ mux sync.Mutex
+ closed bool
+ quit chan struct{}
+ ctx context.Context
+}
+
+func (w *watcher) RegisterProducer(ctx context.Context, id string) (common.Producer, error) {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+
+ if _, ok := w.producers[id]; ok {
+ return nil, fmt.Errorf("producer_id %s: %w", id, common.ErrProducerAlreadyRegistered)
+ }
+ p := &producer{
+ id: id,
+ messages: make(chan common.ChangePayload, 1),
+ quit: make(chan struct{}),
+ ctx: ctx,
+ }
+ w.producers[id] = p
+ go w.serviceProducer(p)
+ return p, nil
+}
+
+func (w *watcher) serviceProducer(prod *producer) {
+ defer func() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ prod.Close()
+ slog.InfoContext(w.ctx, "removing producer from watcher", "consumer_id", prod.id)
+ delete(w.producers, prod.id)
+ }()
+ for {
+ select {
+ case <-w.quit:
+ slog.InfoContext(w.ctx, "shutting down watcher")
+ return
+ case <-w.ctx.Done():
+ slog.InfoContext(w.ctx, "shutting down watcher")
+ return
+ case <-prod.quit:
+ slog.InfoContext(w.ctx, "closing producer")
+ return
+ case <-prod.ctx.Done():
+ slog.InfoContext(w.ctx, "closing producer")
+ return
+ case payload := <-prod.messages:
+ w.mux.Lock()
+ for _, c := range w.consumers {
+ go c.Send(payload)
+ }
+ w.mux.Unlock()
+ }
+ }
+}
+
+func (w *watcher) RegisterConsumer(ctx context.Context, id string, filters ...common.PayloadFilterFunc) (common.Consumer, error) {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if _, ok := w.consumers[id]; ok {
+ return nil, common.ErrConsumerAlreadyRegistered
+ }
+ c := &consumer{
+ messages: make(chan common.ChangePayload, 1),
+ filters: filters,
+ quit: make(chan struct{}),
+ id: id,
+ ctx: ctx,
+ }
+ w.consumers[id] = c
+ go w.serviceConsumer(c)
+ return c, nil
+}
+
+func (w *watcher) serviceConsumer(consumer *consumer) {
+ defer func() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ consumer.Close()
+ slog.InfoContext(w.ctx, "removing consumer from watcher", "consumer_id", consumer.id)
+ delete(w.consumers, consumer.id)
+ }()
+ slog.InfoContext(w.ctx, "starting consumer", "consumer_id", consumer.id)
+ for {
+ select {
+ case <-consumer.quit:
+ return
+ case <-consumer.ctx.Done():
+ return
+ case <-w.quit:
+ return
+ case <-w.ctx.Done():
+ return
+ }
+ }
+}
+
+func (w *watcher) Close() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if w.closed {
+ return
+ }
+
+ close(w.quit)
+ w.closed = true
+
+ for _, p := range w.producers {
+ p.Close()
+ }
+
+ for _, c := range w.consumers {
+ c.Close()
+ }
+
+ databaseWatcher = nil
+}
+
+func (w *watcher) loop() {
+ defer func() {
+ w.Close()
+ }()
+ for {
+ select {
+ case <-w.quit:
+ return
+ case <-w.ctx.Done():
+ return
+ }
+ }
+}
diff --git a/database/watcher/watcher_store_test.go b/database/watcher/watcher_store_test.go
new file mode 100644
index 00000000..97fc8a9d
--- /dev/null
+++ b/database/watcher/watcher_store_test.go
@@ -0,0 +1,1105 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package watcher_test
+
+import (
+ "context"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/suite"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+type WatcherStoreTestSuite struct {
+ suite.Suite
+
+ store common.Store
+ ctx context.Context
+}
+
+func (s *WatcherStoreTestSuite) TestJobWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "job-test",
+ watcher.WithEntityTypeFilter(common.JobEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ jobParams := params.Job{
+ WorkflowJobID: 2,
+ RunID: 2,
+ Action: "test-action",
+ Conclusion: "started",
+ Status: "in_progress",
+ Name: "test-job",
+ }
+
+ job, err := s.store.CreateOrUpdateJob(s.ctx, jobParams)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.CreateOperation,
+ Payload: job,
+ }, event)
+ asJob, ok := event.Payload.(params.Job)
+ s.Require().True(ok)
+ s.Require().Equal(job.ID, int64(1))
+ s.Require().Equal(asJob.ID, int64(1))
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ job.Conclusion = "success"
+ updatedJob, err := s.store.CreateOrUpdateJob(s.ctx, job)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedJob,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ entityID, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ err = s.store.LockJob(s.ctx, updatedJob.WorkflowJobID, entityID.String())
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(event.Operation, common.UpdateOperation)
+ s.Require().Equal(event.EntityType, common.JobEntityType)
+
+ job, ok := event.Payload.(params.Job)
+ s.Require().True(ok)
+ s.Require().Equal(job.ID, updatedJob.ID)
+ s.Require().Equal(job.LockedBy, entityID)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.UnlockJob(s.ctx, updatedJob.WorkflowJobID, entityID.String())
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(event.Operation, common.UpdateOperation)
+ s.Require().Equal(event.EntityType, common.JobEntityType)
+
+ job, ok := event.Payload.(params.Job)
+ s.Require().True(ok)
+ s.Require().Equal(job.ID, updatedJob.ID)
+ s.Require().Equal(job.LockedBy, uuid.Nil)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ jobParams.Status = "queued"
+ jobParams.LockedBy = entityID
+
+ updatedJob, err = s.store.CreateOrUpdateJob(s.ctx, jobParams)
+ s.Require().NoError(err)
+ // We don't care about the update event here.
+ consumeEvents(consumer)
+
+ err = s.store.BreakLockJobIsQueued(s.ctx, updatedJob.WorkflowJobID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(event.Operation, common.UpdateOperation)
+ s.Require().Equal(event.EntityType, common.JobEntityType)
+
+ job, ok := event.Payload.(params.Job)
+ s.Require().True(ok)
+ s.Require().Equal(job.ID, updatedJob.ID)
+ s.Require().Equal(uuid.Nil, job.LockedBy)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestInstanceWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "instance-test",
+ watcher.WithEntityTypeFilter(common.InstanceEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() { s.store.DeleteGithubCredentials(s.ctx, creds.ID) })
+
+ repo, err := s.store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createPoolParams := params.CreatePoolParams{
+ ProviderName: "test-provider",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"test-tag"},
+ }
+
+ pool, err := s.store.CreateEntityPool(s.ctx, entity, createPoolParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(pool.ID)
+ s.T().Cleanup(func() { s.store.DeleteEntityPool(s.ctx, entity, pool.ID) })
+
+ createInstanceParams := params.CreateInstanceParams{
+ Name: "test-instance",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Status: commonParams.InstanceCreating,
+ }
+ instance, err := s.store.CreateInstance(s.ctx, pool.ID, createInstanceParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(instance.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.CreateOperation,
+ Payload: instance,
+ }, event)
+ asInstance, ok := event.Payload.(params.Instance)
+ s.Require().True(ok)
+ s.Require().Equal(instance.Name, "test-instance")
+ s.Require().Equal(asInstance.Name, "test-instance")
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdateInstanceParams{
+ RunnerStatus: params.RunnerActive,
+ }
+
+ updatedInstance, err := s.store.UpdateInstance(s.ctx, instance.Name, updateParams)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedInstance,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteInstance(s.ctx, pool.ID, updatedInstance.Name)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.DeleteOperation,
+ Payload: params.Instance{
+ ID: updatedInstance.ID,
+ Name: updatedInstance.Name,
+ ProviderID: updatedInstance.ProviderID,
+ AgentID: updatedInstance.AgentID,
+ PoolID: updatedInstance.PoolID,
+ },
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestScaleSetInstanceWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "instance-test",
+ watcher.WithEntityTypeFilter(common.InstanceEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() { s.store.DeleteGithubCredentials(s.ctx, creds.ID) })
+
+ repo, err := s.store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createScaleSetParams := params.CreateScaleSetParams{
+ ProviderName: "test-provider",
+ Name: "test-scaleset",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ MinIdleRunners: 0,
+ MaxRunners: 1,
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ }
+
+ scaleSet, err := s.store.CreateEntityScaleSet(s.ctx, entity, createScaleSetParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(scaleSet.ID)
+ s.T().Cleanup(func() { s.store.DeleteScaleSetByID(s.ctx, scaleSet.ID) })
+
+ createInstanceParams := params.CreateInstanceParams{
+ Name: "test-instance",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Status: commonParams.InstanceCreating,
+ }
+ instance, err := s.store.CreateScaleSetInstance(s.ctx, scaleSet.ID, createInstanceParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(instance.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.CreateOperation,
+ Payload: instance,
+ }, event)
+ asInstance, ok := event.Payload.(params.Instance)
+ s.Require().True(ok)
+ s.Require().Equal(instance.Name, "test-instance")
+ s.Require().Equal(asInstance.Name, "test-instance")
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdateInstanceParams{
+ RunnerStatus: params.RunnerActive,
+ }
+
+ updatedInstance, err := s.store.UpdateInstance(s.ctx, instance.Name, updateParams)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedInstance,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteInstanceByName(s.ctx, updatedInstance.Name)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.DeleteOperation,
+ Payload: params.Instance{
+ ID: updatedInstance.ID,
+ Name: updatedInstance.Name,
+ ProviderID: updatedInstance.ProviderID,
+ AgentID: updatedInstance.AgentID,
+ ScaleSetID: updatedInstance.ScaleSetID,
+ },
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestPoolWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "pool-test",
+ watcher.WithEntityTypeFilter(common.PoolEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() {
+ if err := s.store.DeleteGithubCredentials(s.ctx, creds.ID); err != nil {
+ s.T().Logf("failed to delete Github credentials: %v", err)
+ }
+ })
+
+ repo, err := s.store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createPoolParams := params.CreatePoolParams{
+ ProviderName: "test-provider",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"test-tag"},
+ }
+ pool, err := s.store.CreateEntityPool(s.ctx, entity, createPoolParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(pool.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.CreateOperation,
+ Payload: pool,
+ }, event)
+ asPool, ok := event.Payload.(params.Pool)
+ s.Require().True(ok)
+ s.Require().Equal(pool.Image, "test-image")
+ s.Require().Equal(asPool.Image, "test-image")
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdatePoolParams{
+ Tags: []string{"updated-tag"},
+ }
+
+ updatedPool, err := s.store.UpdateEntityPool(s.ctx, entity, pool.ID, updateParams)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedPool,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteEntityPool(s.ctx, entity, pool.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.DeleteOperation,
+ Payload: params.Pool{ID: pool.ID},
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ // Also test DeletePoolByID
+ pool, err = s.store.CreateEntityPool(s.ctx, entity, createPoolParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(pool.ID)
+
+ // Consume the create event
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.CreateOperation,
+ Payload: pool,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeletePoolByID(s.ctx, pool.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.DeleteOperation,
+ Payload: params.Pool{ID: pool.ID},
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestScaleSetWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "scaleset-test",
+ watcher.WithEntityTypeFilter(common.ScaleSetEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() {
+ if err := s.store.DeleteGithubCredentials(s.ctx, creds.ID); err != nil {
+ s.T().Logf("failed to delete Github credentials: %v", err)
+ }
+ })
+
+ repo, err := s.store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+ s.T().Cleanup(func() { s.store.DeleteRepository(s.ctx, repo.ID) })
+
+ entity, err := repo.GetEntity()
+ s.Require().NoError(err)
+
+ createScaleSetParams := params.CreateScaleSetParams{
+ ProviderName: "test-provider",
+ Name: "test-scaleset",
+ Image: "test-image",
+ Flavor: "test-flavor",
+ MinIdleRunners: 0,
+ MaxRunners: 1,
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ Tags: []string{"test-tag"},
+ }
+ scaleSet, err := s.store.CreateEntityScaleSet(s.ctx, entity, createScaleSetParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(scaleSet.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.CreateOperation,
+ Payload: scaleSet,
+ }, event)
+ asScaleSet, ok := event.Payload.(params.ScaleSet)
+ s.Require().True(ok)
+ s.Require().Equal(scaleSet.Image, "test-image")
+ s.Require().Equal(asScaleSet.Image, "test-image")
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdateScaleSetParams{
+ Flavor: "updated-flavor",
+ }
+
+ callbackFn := func(old, newScaleSet params.ScaleSet) error {
+ s.Require().Equal(old.ID, newScaleSet.ID)
+ s.Require().Equal(old.Flavor, "test-flavor")
+ s.Require().Equal(newScaleSet.Flavor, "updated-flavor")
+ return nil
+ }
+ updatedScaleSet, err := s.store.UpdateEntityScaleSet(s.ctx, entity, scaleSet.ID, updateParams, callbackFn)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedScaleSet,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.SetScaleSetLastMessageID(s.ctx, updatedScaleSet.ID, 99)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ asScaleSet, ok := event.Payload.(params.ScaleSet)
+ s.Require().True(ok)
+ s.Require().Equal(asScaleSet.ID, updatedScaleSet.ID)
+ s.Require().Equal(asScaleSet.LastMessageID, int64(99))
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.SetScaleSetDesiredRunnerCount(s.ctx, updatedScaleSet.ID, 5)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ asScaleSet, ok := event.Payload.(params.ScaleSet)
+ s.Require().True(ok)
+ s.Require().Equal(asScaleSet.ID, updatedScaleSet.ID)
+ s.Require().Equal(asScaleSet.DesiredRunnerCount, 5)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteScaleSetByID(s.ctx, scaleSet.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ // We updated last message ID and desired runner count above.
+ updatedScaleSet.DesiredRunnerCount = 5
+ updatedScaleSet.LastMessageID = 99
+ payloadFromEvent, ok := event.Payload.(params.ScaleSet)
+ s.Require().True(ok)
+ updatedScaleSet.UpdatedAt = payloadFromEvent.UpdatedAt
+ updatedScaleSet.CreatedAt = payloadFromEvent.CreatedAt
+ updatedScaleSet.Endpoint = params.ForgeEndpoint{}
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.DeleteOperation,
+ Payload: updatedScaleSet,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestControllerWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "controller-test",
+ watcher.WithEntityTypeFilter(common.ControllerEntityType),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ metadataURL := "http://metadata.example.com"
+ updateParams := params.UpdateControllerParams{
+ MetadataURL: &metadataURL,
+ }
+
+ controller, err := s.store.UpdateController(updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal(metadataURL, controller.MetadataURL)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.ControllerEntityType,
+ Operation: common.UpdateOperation,
+ Payload: controller,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestEnterpriseWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "enterprise-test",
+ watcher.WithEntityTypeFilter(common.EnterpriseEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() { s.store.DeleteGithubCredentials(s.ctx, creds.ID) })
+
+ ent, err := s.store.CreateEnterprise(s.ctx, "test-enterprise", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(ent.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.EnterpriseEntityType,
+ Operation: common.CreateOperation,
+ Payload: ent,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdateEntityParams{
+ WebhookSecret: "updated",
+ }
+
+ updatedEnt, err := s.store.UpdateEnterprise(s.ctx, ent.ID, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal("updated", updatedEnt.WebhookSecret)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.EnterpriseEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedEnt,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteEnterprise(s.ctx, ent.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.EnterpriseEntityType,
+ Operation: common.DeleteOperation,
+ Payload: updatedEnt,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestOrgWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "org-test",
+ watcher.WithEntityTypeFilter(common.OrganizationEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() { s.store.DeleteGithubCredentials(s.ctx, creds.ID) })
+
+ org, err := s.store.CreateOrganization(s.ctx, "test-org", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(org.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.CreateOperation,
+ Payload: org,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ updateParams := params.UpdateEntityParams{
+ WebhookSecret: "updated",
+ }
+
+ updatedOrg, err := s.store.UpdateOrganization(s.ctx, org.ID, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal("updated", updatedOrg.WebhookSecret)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedOrg,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteOrganization(s.ctx, org.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.DeleteOperation,
+ Payload: updatedOrg,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestRepoWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "repo-test",
+ watcher.WithEntityTypeFilter(common.RepositoryEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ep := garmTesting.CreateDefaultGithubEndpoint(s.ctx, s.store, s.T())
+ creds := garmTesting.CreateTestGithubCredentials(s.ctx, "test-creds", s.store, s.T(), ep)
+ s.T().Cleanup(func() { s.store.DeleteGithubCredentials(s.ctx, creds.ID) })
+
+ repo, err := s.store.CreateRepository(s.ctx, "test-owner", "test-repo", creds, "test-secret", params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(repo.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.CreateOperation,
+ Payload: repo,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ newSecret := "updated"
+ updateParams := params.UpdateEntityParams{
+ WebhookSecret: newSecret,
+ }
+
+ updatedRepo, err := s.store.UpdateRepository(s.ctx, repo.ID, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newSecret, updatedRepo.WebhookSecret)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedRepo,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteRepository(s.ctx, repo.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.DeleteOperation,
+ Payload: updatedRepo,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestGithubCredentialsWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "gh-cred-test",
+ watcher.WithEntityTypeFilter(common.GithubCredentialsEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ghCredParams := params.CreateGithubCredentialsParams{
+ Name: "test-creds",
+ Description: "test credentials",
+ Endpoint: "github.com",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ }
+
+ ghCred, err := s.store.CreateGithubCredentials(s.ctx, ghCredParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(ghCred.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubCredentialsEntityType,
+ Operation: common.CreateOperation,
+ Payload: ghCred,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ newDesc := "updated description"
+ updateParams := params.UpdateGithubCredentialsParams{
+ Description: &newDesc,
+ }
+
+ updatedGhCred, err := s.store.UpdateGithubCredentials(s.ctx, ghCred.ID, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDesc, updatedGhCred.Description)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubCredentialsEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedGhCred,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteGithubCredentials(s.ctx, ghCred.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubCredentialsEntityType,
+ Operation: common.DeleteOperation,
+ // We only get the ID and Name of the deleted entity
+ Payload: params.ForgeCredentials{ID: ghCred.ID, Name: ghCred.Name},
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestGiteaCredentialsWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "gitea-cred-test",
+ watcher.WithEntityTypeFilter(common.GiteaCredentialsEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ testEndpointParams := params.CreateGiteaEndpointParams{
+ Name: "test",
+ Description: "test endpoint",
+ APIBaseURL: "https://api.gitea.example.com",
+ BaseURL: "https://gitea.example.com",
+ }
+
+ testEndpoint, err := s.store.CreateGiteaEndpoint(s.ctx, testEndpointParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(testEndpoint.Name)
+
+ s.T().Cleanup(func() {
+ if err := s.store.DeleteGiteaEndpoint(s.ctx, testEndpoint.Name); err != nil {
+ s.T().Logf("failed to delete Gitea endpoint: %v", err)
+ }
+ consumeEvents(consumer)
+ })
+
+ giteaCredParams := params.CreateGiteaCredentialsParams{
+ Name: "test-creds",
+ Description: "test credentials",
+ Endpoint: testEndpoint.Name,
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "bogus",
+ },
+ }
+
+ giteaCred, err := s.store.CreateGiteaCredentials(s.ctx, giteaCredParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(giteaCred.ID)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GiteaCredentialsEntityType,
+ Operation: common.CreateOperation,
+ Payload: giteaCred,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ newDesc := "updated test description"
+ updateParams := params.UpdateGiteaCredentialsParams{
+ Description: &newDesc,
+ }
+
+ updatedGiteaCred, err := s.store.UpdateGiteaCredentials(s.ctx, giteaCred.ID, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDesc, updatedGiteaCred.Description)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GiteaCredentialsEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedGiteaCred,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteGiteaCredentials(s.ctx, giteaCred.ID)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ asCreds, ok := event.Payload.(params.ForgeCredentials)
+ s.Require().True(ok)
+ s.Require().Equal(event.Operation, common.DeleteOperation)
+ s.Require().Equal(event.EntityType, common.GiteaCredentialsEntityType)
+ s.Require().Equal(asCreds.ID, updatedGiteaCred.ID)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func (s *WatcherStoreTestSuite) TestGithubEndpointWatcher() {
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "gh-ep-test",
+ watcher.WithEntityTypeFilter(common.GithubEndpointEntityType),
+ watcher.WithAny(
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ watcher.WithOperationTypeFilter(common.UpdateOperation),
+ watcher.WithOperationTypeFilter(common.DeleteOperation)),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ s.T().Cleanup(func() { consumer.Close() })
+ consumeEvents(consumer)
+
+ ghEpParams := params.CreateGithubEndpointParams{
+ Name: "test",
+ Description: "test endpoint",
+ APIBaseURL: "https://api.ghes.example.com",
+ UploadBaseURL: "https://upload.ghes.example.com",
+ BaseURL: "https://ghes.example.com",
+ }
+
+ ghEp, err := s.store.CreateGithubEndpoint(s.ctx, ghEpParams)
+ s.Require().NoError(err)
+ s.Require().NotEmpty(ghEp.Name)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubEndpointEntityType,
+ Operation: common.CreateOperation,
+ Payload: ghEp,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ newDesc := "updated description"
+ updateParams := params.UpdateGithubEndpointParams{
+ Description: &newDesc,
+ }
+
+ updatedGhEp, err := s.store.UpdateGithubEndpoint(s.ctx, ghEp.Name, updateParams)
+ s.Require().NoError(err)
+ s.Require().Equal(newDesc, updatedGhEp.Description)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubEndpointEntityType,
+ Operation: common.UpdateOperation,
+ Payload: updatedGhEp,
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+
+ err = s.store.DeleteGithubEndpoint(s.ctx, ghEp.Name)
+ s.Require().NoError(err)
+
+ select {
+ case event := <-consumer.Watch():
+ s.Require().Equal(common.ChangePayload{
+ EntityType: common.GithubEndpointEntityType,
+ Operation: common.DeleteOperation,
+ // We only get the name of the deleted entity
+ Payload: params.ForgeEndpoint{Name: ghEp.Name},
+ }, event)
+ case <-time.After(1 * time.Second):
+ s.T().Fatal("expected payload not received")
+ }
+}
+
+func consumeEvents(consumer common.Consumer) {
+consume:
+ for {
+ select {
+ case _, ok := <-consumer.Watch():
+ // throw away event.
+ if !ok {
+ return
+ }
+ case <-time.After(20 * time.Millisecond):
+ break consume
+ }
+ }
+}
diff --git a/database/watcher/watcher_test.go b/database/watcher/watcher_test.go
new file mode 100644
index 00000000..fcbcc4eb
--- /dev/null
+++ b/database/watcher/watcher_test.go
@@ -0,0 +1,1488 @@
+//go:build testing
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package watcher_test
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/suite"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/database"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ garmTesting "github.com/cloudbase/garm/internal/testing"
+ "github.com/cloudbase/garm/params"
+)
+
+type WatcherTestSuite struct {
+ suite.Suite
+ store common.Store
+ ctx context.Context
+}
+
+func (s *WatcherTestSuite) SetupTest() {
+ ctx := context.TODO()
+ watcher.InitWatcher(ctx)
+ store, err := database.NewDatabase(ctx, garmTesting.GetTestSqliteDBConfig(s.T()))
+ if err != nil {
+ s.T().Fatalf("failed to create db connection: %s", err)
+ }
+ s.store = store
+}
+
+func (s *WatcherTestSuite) TearDownTest() {
+ s.store = nil
+ currentWatcher := watcher.GetWatcher()
+ if currentWatcher != nil {
+ currentWatcher.Close()
+ watcher.SetWatcher(nil)
+ }
+}
+
+func (s *WatcherTestSuite) TestRegisterConsumerTwiceWillError() {
+ consumer, err := watcher.RegisterConsumer(s.ctx, "test")
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ consumer, err = watcher.RegisterConsumer(s.ctx, "test")
+ s.Require().ErrorIs(err, common.ErrConsumerAlreadyRegistered)
+ s.Require().Nil(consumer)
+}
+
+func (s *WatcherTestSuite) TestRegisterProducerTwiceWillError() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ producer, err = watcher.RegisterProducer(s.ctx, "test")
+ s.Require().ErrorIs(err, common.ErrProducerAlreadyRegistered)
+ s.Require().Nil(producer)
+}
+
+func (s *WatcherTestSuite) TestInitWatcherRanTwiceDoesNotReplaceWatcher() {
+ ctx := context.TODO()
+ currentWatcher := watcher.GetWatcher()
+ s.Require().NotNil(currentWatcher)
+ watcher.InitWatcher(ctx)
+ newWatcher := watcher.GetWatcher()
+ s.Require().Equal(currentWatcher, newWatcher)
+}
+
+func (s *WatcherTestSuite) TestRegisterConsumerFailsIfWatcherIsNotInitialized() {
+ s.store = nil
+ currentWatcher := watcher.GetWatcher()
+ currentWatcher.Close()
+
+ consumer, err := watcher.RegisterConsumer(s.ctx, "test")
+ s.Require().Nil(consumer)
+ s.Require().ErrorIs(err, common.ErrWatcherNotInitialized)
+}
+
+func (s *WatcherTestSuite) TestRegisterProducerFailsIfWatcherIsNotInitialized() {
+ s.store = nil
+ currentWatcher := watcher.GetWatcher()
+ currentWatcher.Close()
+
+ producer, err := watcher.RegisterProducer(s.ctx, "test")
+ s.Require().Nil(producer)
+ s.Require().ErrorIs(err, common.ErrWatcherNotInitialized)
+}
+
+func (s *WatcherTestSuite) TestProducerAndConsumer() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityTypeFilter(common.ControllerEntityType),
+ watcher.WithOperationTypeFilter(common.UpdateOperation))
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ControllerEntityType,
+ Operation: common.UpdateOperation,
+ Payload: "test",
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := <-consumer.Watch()
+ s.Require().Equal(payload, receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestConsumeWithFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityTypeFilter(common.ControllerEntityType),
+ watcher.WithOperationTypeFilter(common.UpdateOperation))
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ControllerEntityType,
+ Operation: common.UpdateOperation,
+ Payload: "test",
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ControllerEntityType,
+ Operation: common.CreateOperation,
+ Payload: "test",
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithAnyFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithAny(
+ watcher.WithEntityTypeFilter(common.ControllerEntityType),
+ watcher.WithEntityFilter(params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ }),
+ ))
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ControllerEntityType,
+ Operation: common.UpdateOperation,
+ Payload: "test",
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ // We're not watching for this repo
+ payload = common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ Owner: "test",
+ Name: "test",
+ ID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ // We're not watching for orgs
+ payload = common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ Owner: "test",
+ Name: "test",
+ ID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithAllFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithAll(
+ watcher.WithEntityFilter(params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ }),
+ watcher.WithOperationTypeFilter(common.CreateOperation),
+ ))
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.CreateOperation,
+ Payload: params.Repository{
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func maybeInitController(db common.Store) error {
+ if _, err := db.ControllerInfo(); err == nil {
+ return nil
+ }
+
+ if _, err := db.InitController(); err != nil {
+ return fmt.Errorf("error initializing controller: %w", err)
+ }
+
+ return nil
+}
+
+func (s *WatcherTestSuite) TestWithEntityPoolFilterRepository() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityPoolFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ RepoID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ RepoID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityPoolFilterOrg() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityPoolFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ OrgID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ OrgID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityPoolFilterEnterprise() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeEnterprise,
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityPoolFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ EnterpriseID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ EnterpriseID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ // Invalid payload for declared entity type
+ payload = common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ EnterpriseID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityPoolFilterBogusEntityType() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ // This should trigger the default branch in the filter and
+ // return false
+ EntityType: params.ForgeEntityType("bogus"),
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityPoolFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ EnterpriseID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.PoolEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{
+ ID: "test",
+ EnterpriseID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityScaleSetFilterRepository() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ Credentials: params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityScaleSetFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ RepoID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ RepoID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityScaleSetFilterOrg() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ ID: "test",
+ Credentials: params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityScaleSetFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ OrgID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ OrgID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityScaleSetFilterEnterprise() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeEnterprise,
+ Name: "test",
+ ID: "test",
+ Credentials: params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityScaleSetFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ EnterpriseID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ EnterpriseID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityScaleSetFilterBogusEntityType() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ // This should trigger the default branch in the filter and
+ // return false
+ EntityType: params.ForgeEntityType("bogus"),
+ Name: "test",
+ ID: "test",
+ Credentials: params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ },
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityScaleSetFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ EnterpriseID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ EnterpriseID: "test2",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityScaleSetFilterReturnsFalseForGiteaEndpoints() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ Credentials: params.ForgeCredentials{
+ ForgeType: params.GiteaEndpointType,
+ },
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityScaleSetFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ RepoID: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityFilterRepository() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ ID: "test",
+ Name: "test",
+ Owner: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ ID: "test2",
+ Name: "test",
+ Owner: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityFilterOrg() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Organization{
+ ID: "test",
+ Name: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Organization{
+ ID: "test2",
+ Name: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityFilterEnterprise() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeEnterprise,
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.EnterpriseEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Enterprise{
+ ID: "test",
+ Name: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.EnterpriseEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Enterprise{
+ ID: "test2",
+ Name: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityJobFilterRepository() {
+ repoUUID, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ repoUUID2, err := uuid.NewUUID()
+ s.Require().NoError(err)
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeRepository,
+ Owner: "test",
+ Name: "test",
+ ID: repoUUID.String(),
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityJobFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ RepoID: &repoUUID,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ RepoID: &repoUUID2,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityJobFilterOrg() {
+ orgUUID, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ orgUUID2, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeOrganization,
+ Name: "test",
+ ID: orgUUID.String(),
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityJobFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ OrgID: &orgUUID,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ OrgID: &orgUUID2,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityJobFilterEnterprise() {
+ entUUID, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ entUUID2, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ EntityType: params.ForgeEntityTypeEnterprise,
+ Name: "test",
+ ID: entUUID.String(),
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityJobFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ EnterpriseID: &entUUID,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ EnterpriseID: &entUUID2,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithEntityJobFilterBogusEntityType() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ entity := params.ForgeEntity{
+ // This should trigger the default branch in the filter and
+ // return false
+ EntityType: params.ForgeEntityType("bogus"),
+ Name: "test",
+ ID: "test",
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithEntityJobFilter(entity),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ EnterpriseID: nil,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.JobEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Job{
+ ID: 1,
+ Name: "test",
+ EnterpriseID: nil,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithNone() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithNone(),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ ID: "test",
+ Name: "test",
+ Owner: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithUserIDFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ userID, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ userID2, err := uuid.NewUUID()
+ s.Require().NoError(err)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithUserIDFilter(userID.String()),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.UserEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.User{
+ ID: userID.String(),
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.UserEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.User{
+ ID: userID2.String(),
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.UserEntityType,
+ Operation: common.UpdateOperation,
+ // Declare as user, but payload is a pool. Filter should return false.
+ Payload: params.Pool{},
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithForgeCredentialsGithub() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ creds := params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ ID: 1,
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithForgeCredentialsFilter(creds),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.GithubCredentialsEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ForgeCredentials{
+ ForgeType: params.GithubEndpointType,
+ ID: 1,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.GiteaCredentialsEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ForgeCredentials{
+ ForgeType: params.GiteaEndpointType,
+ ID: 1,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.GiteaCredentialsEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{},
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithcaleSetFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ scaleSet := params.ScaleSet{
+ ID: 1,
+ }
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithScaleSetFilter(scaleSet),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 1,
+ Name: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.ScaleSet{
+ ID: 2,
+ Name: "test",
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.ScaleSetEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Pool{},
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithExcludeEntityTypeFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithExcludeEntityTypeFilter(common.RepositoryEntityType),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.RepositoryEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ ID: "test",
+ Name: "test",
+ Owner: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.OrganizationEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Repository{
+ ID: "test",
+ Name: "test",
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+}
+
+func (s *WatcherTestSuite) TestWithInstanceStatusFilter() {
+ producer, err := watcher.RegisterProducer(s.ctx, "test-producer")
+ s.Require().NoError(err)
+ s.Require().NotNil(producer)
+
+ consumer, err := watcher.RegisterConsumer(
+ s.ctx, "test-consumer",
+ watcher.WithInstanceStatusFilter(
+ commonParams.InstanceCreating,
+ commonParams.InstanceDeleting),
+ )
+ s.Require().NoError(err)
+ s.Require().NotNil(consumer)
+ consumeEvents(consumer)
+
+ payload := common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Instance{
+ ID: "test-instance",
+ Status: commonParams.InstanceCreating,
+ },
+ }
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+
+ receivedPayload := waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Instance{
+ ID: "test-instance",
+ Status: commonParams.InstanceDeleted,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().Nil(receivedPayload)
+
+ payload = common.ChangePayload{
+ EntityType: common.InstanceEntityType,
+ Operation: common.UpdateOperation,
+ Payload: params.Instance{
+ ID: "test-instance",
+ Status: commonParams.InstanceDeleting,
+ },
+ }
+
+ err = producer.Notify(payload)
+ s.Require().NoError(err)
+ receivedPayload = waitForPayload(consumer.Watch(), 100*time.Millisecond)
+ s.Require().NotNil(receivedPayload)
+ s.Require().Equal(payload, *receivedPayload)
+}
+
+func TestWatcherTestSuite(t *testing.T) {
+ // Watcher tests
+ watcherSuite := &WatcherTestSuite{
+ ctx: context.TODO(),
+ }
+ suite.Run(t, watcherSuite)
+
+ ctx := context.Background()
+ watcher.InitWatcher(ctx)
+
+ store, err := database.NewDatabase(ctx, garmTesting.GetTestSqliteDBConfig(t))
+ if err != nil {
+ t.Fatalf("failed to create db connection: %s", err)
+ }
+
+ err = maybeInitController(store)
+ if err != nil {
+ t.Fatalf("failed to init controller: %s", err)
+ }
+
+ adminCtx := garmTesting.ImpersonateAdminContext(ctx, store, t)
+ watcherStoreSuite := &WatcherStoreTestSuite{
+ ctx: adminCtx,
+ store: store,
+ }
+ suite.Run(t, watcherStoreSuite)
+}
diff --git a/doc/building_from_source.md b/doc/building_from_source.md
new file mode 100644
index 00000000..e5d2d0fd
--- /dev/null
+++ b/doc/building_from_source.md
@@ -0,0 +1,87 @@
+# Building GARM from source
+
+The procedure is simple. You will need to gave [go](https://golang.org/) installed as well as `make`.
+
+First, clone the repository:
+
+```bash
+git clone https://github.com/cloudbase/garm
+cd garm
+```
+
+Then build garm:
+
+```bash
+make build
+```
+
+You should now have both `garm` and `garm-cli` available in the `./bin` folder.
+
+If you have docker/podman installed, you can also build a static binary against `musl`:
+
+```bash
+make build-static
+```
+
+This command will also build for both AMD64 and ARM64. Resulting binaries will be in the `./bin` folder.
+
+## Hacking
+
+If you're hacking on GARM and want to override the default version GARM injects, you can run the following command:
+
+```bash
+VERSION=v1.0.0 make build
+```
+
+> [!IMPORTANT]
+> This only works for `make build`. The `make build-static` command does not support version overrides.
+
+## The Web UI SPA
+
+GARM now ships with a single page application. The application is written in svelte and tailwind CSS. To rebuild it or hack on it, you will need a number of dependencies installed and placed in your `$PATH`.
+
+### Prerequisites
+
+- **Node.js 24+** and **npm**
+- **Go 1.21+** (for building the GARM backend)
+- **openapi-generator-cli** in your PATH (for API client generation)
+
+### Installing openapi-generator-cli
+
+**Option 1: NPM Global Install**
+```bash
+npm install -g @openapitools/openapi-generator-cli
+```
+
+**Option 2: Manual Install**
+Download from [OpenAPI Generator releases](https://github.com/OpenAPITools/openapi-generator/releases) and add to your PATH.
+
+**Verify Installation:**
+
+```bash
+openapi-generator-cli version
+```
+
+
+
+### Hacking on the Web UI
+
+If you need to change something in the `webapp/src` folder, make sure to rebuild the webapp before rebuilding GARM:
+
+```bash
+make build-webui
+make build
+```
+
+> [!IMPORTANT]
+> The Web UI that GARM ships with has `go generate` stanzas that require `@openapitools/openapi-generator-cli` and `tailwindcss` to be installed. You will also have to make sure that if you change API models, the Web UI still works, as adding new fields or changing the json tags of old fields will change accessors in the client code.
+
+### Changing API models
+
+If you need to change the models in the `params/` package, you will also need to regenerate the client both for garm-cli and for the web application we ship with GARM. To do this, you can run:
+
+```bash
+make generate
+```
+
+You will also need to make sure that the web app still works.
diff --git a/doc/config.md b/doc/config.md
new file mode 100644
index 00000000..3c67e1b4
--- /dev/null
+++ b/doc/config.md
@@ -0,0 +1,482 @@
+# Configuration
+
+The ```GARM``` configuration is a simple ```toml```. The sample config file in [the testdata folder](/testdata/config.toml) is fairly well commented and should be enough to get you started. The configuration file is split into several sections, each of which is documented in its own page. The sections are:
+
+
+
+- [Configuration](#configuration)
+ - [The default config section](#the-default-config-section)
+ - [The callback_url option](#the-callback_url-option)
+ - [The metadata_url option](#the-metadata_url-option)
+ - [The debug_server option](#the-debug_server-option)
+ - [The log_file option](#the-log_file-option)
+ - [Rotating log files](#rotating-log-files)
+ - [The enable_log_streamer option](#the-enable_log_streamer-option)
+ - [The logging section](#the-logging-section)
+ - [Database configuration](#database-configuration)
+ - [Provider configuration](#provider-configuration)
+ - [Providers](#providers)
+ - [Available external providers](#available-external-providers)
+ - [The metrics section](#the-metrics-section)
+ - [Common metrics](#common-metrics)
+ - [Enterprise metrics](#enterprise-metrics)
+ - [Organization metrics](#organization-metrics)
+ - [Repository metrics](#repository-metrics)
+ - [Provider metrics](#provider-metrics)
+ - [Pool metrics](#pool-metrics)
+ - [Runner metrics](#runner-metrics)
+ - [Github metrics](#github-metrics)
+ - [Enabling metrics](#enabling-metrics)
+ - [Configuring prometheus](#configuring-prometheus)
+ - [The JWT authentication config section](#the-jwt-authentication-config-section)
+ - [The API server config section](#the-api-server-config-section)
+
+
+
+## The default config section
+
+The `default` config section holds configuration options that don't need a category of their own, but are essential to the operation of the service. In this section we will detail each of the options available in the `default` section.
+
+```toml
+[default]
+# Uncomment this line if you'd like to log to a file instead of standard output.
+# log_file = "/tmp/runner-manager.log"
+
+# Enable streaming logs via web sockets. Use garm-cli debug-log.
+enable_log_streamer = false
+
+# Enable the golang debug server. See the documentation in the "doc" folder for more information.
+debug_server = false
+```
+
+### The callback_url option
+
+Your runners will call back home with status updates as they install. Once they are set up, they will also send the GitHub agent ID they were allocated. You will need to configure the ```callback_url``` option in the ```garm``` server config. This URL needs to point to the following API endpoint:
+
+ ```txt
+ POST /api/v1/callbacks/status
+ ```
+
+Example of a runner sending status updates:
+
+ ```bash
+ garm-cli runner show garm-DvxiVAlfHeE7
+ +-----------------+------------------------------------------------------------------------------------+
+ | FIELD | VALUE |
+ +-----------------+------------------------------------------------------------------------------------+
+ | ID | 16b96ba2-d406-45b8-ab66-b70be6237b4e |
+ | Provider ID | garm-DvxiVAlfHeE7 |
+ | Name | garm-DvxiVAlfHeE7 |
+ | OS Type | linux |
+ | OS Architecture | amd64 |
+ | OS Name | ubuntu |
+ | OS Version | jammy |
+ | Status | running |
+ | Runner Status | idle |
+ | Pool ID | 8ec34c1f-b053-4a5d-80d6-40afdfb389f9 |
+ | Addresses | 10.198.117.120 |
+ | Status Updates | 2023-07-08T06:26:46: runner registration token was retrieved |
+ | | 2023-07-08T06:26:46: using cached runner found in /opt/cache/actions-runner/latest |
+ | | 2023-07-08T06:26:50: configuring runner |
+ | | 2023-07-08T06:26:56: runner successfully configured after 1 attempt(s) |
+ | | 2023-07-08T06:26:56: installing runner service |
+ | | 2023-07-08T06:26:56: starting service |
+ | | 2023-07-08T06:26:57: runner successfully installed |
+ +-----------------+------------------------------------------------------------------------------------+
+
+ ```
+
+This URL must be set and must be accessible by the instance. If you wish to restrict access to it, a reverse proxy can be configured to accept requests only from networks in which the runners ```garm``` manages will be spun up. This URL doesn't need to be globally accessible, it just needs to be accessible by the instances.
+
+For example, in a scenario where you expose the API endpoint directly, this setting could look like the following:
+
+ ```toml
+ callback_url = "https://garm.example.com/api/v1/callbacks"
+ ```
+
+Authentication is done using a short-lived JWT token, that gets generated for a particular instance that we are spinning up. That JWT token grants access to the instance to only update its own status and to fetch metadata for itself. No other API endpoints will work with that JWT token. The validity of the token is equal to the pool bootstrap timeout value (default 20 minutes) plus the garm polling interval (5 minutes).
+
+There is a sample ```nginx``` config [in the testdata folder](/testdata/nginx-server.conf). Feel free to customize it in any way you see fit.
+
+### The metadata_url option
+
+The metadata URL is the base URL for any information an instance may need to fetch in order to finish setting itself up. As this URL may be placed behind a reverse proxy, you'll need to configure it in the ```garm``` config file. Ultimately this URL will need to point to the following ```garm``` API endpoint:
+
+ ```bash
+ GET /api/v1/metadata
+ ```
+
+This URL needs to be accessible only by the instances ```garm``` sets up. This URL will not be used by anyone else. To configure it in ```garm``` add the following line in the ```[default]``` section of your ```garm``` config:
+
+ ```toml
+ metadata_url = "https://garm.example.com/api/v1/metadata"
+ ```
+
+### The debug_server option
+
+GARM can optionally enable the golang profiling server. This is useful if you suspect garm may be have a bottleneck in any way. To enable the profiling server, add the following section to the garm config:
+
+```toml
+[default]
+
+debug_server = true
+```
+
+And restart garm. You can then use the following command to start profiling:
+
+```bash
+go tool pprof http://127.0.0.1:9997/debug/pprof/profile?seconds=120
+```
+
+> **IMPORTANT NOTE on profiling when behind a reverse proxy**: The above command will hang for a fairly long time. Most reverse proxies will timeout after about 60 seconds. To avoid this, you should only profile on localhost by connecting directly to garm.
+
+It's also advisable to exclude the debug server URLs from your reverse proxy and only make them available locally.
+
+Now that the debug server is enabled, here is a blog post on how to profile golang applications: https://blog.golang.org/profiling-go-programs
+
+
+### The log_file option
+
+By default, GARM logs everything to standard output.
+
+You can optionally log to file by adding the following to your config file:
+
+```toml
+[default]
+# Use this if you'd like to log to a file instead of standard output.
+log_file = "/tmp/runner-manager.log"
+```
+
+#### Rotating log files
+
+GARM automatically rotates the log if it reaches 500 MB in size or 28 days, whichever comes first.
+
+However, if you want to manually rotate the log file, you can send a `SIGHUP` signal to the GARM process.
+
+You can add the following to your systemd unit file to enable `reload`:
+
+```ini
+[Service]
+ExecReload=/bin/kill -HUP $MAINPID
+```
+
+Then you can simply:
+
+```bash
+systemctl reload garm
+```
+
+### The enable_log_streamer option
+
+This option allows you to stream garm logs directly to your terminal. Set this option to true, then you can use the following command to stream logs:
+
+```bash
+garm-cli debug-log
+```
+
+An important note on enabling this option when behind a reverse proxy. The log streamer uses websockets to stream logs to you. You will need to configure your reverse proxy to allow websocket connections. If you're using nginx, you will need to add the following to your nginx `server` config:
+
+```nginx
+location /api/v1/ws {
+ proxy_pass http://garm_backend;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "Upgrade";
+ proxy_set_header Host $host;
+}
+```
+
+## The logging section
+
+GARM has switched to the `slog` package for logging, adding structured logging. As such, we added a dedicated `logging` section to the config to tweak the logging settings. We moved the `enable_log_streamer` and the `log_file` options from the `default` section to the `logging` section. They are still available in the `default` section for backwards compatibility, but they are deprecated and will be removed in a future release.
+
+An example of the new `logging` section:
+
+```toml
+[logging]
+# Uncomment this line if you'd like to log to a file instead of standard output.
+# log_file = "/tmp/runner-manager.log"
+
+# enable_log_streamer enables streaming the logs over websockets
+enable_log_streamer = true
+# log_format is the output format of the logs. GARM uses structured logging and can
+# output as "text" or "json"
+log_format = "text"
+# log_level is the logging level GARM will output. Available log levels are:
+# * debug
+# * info
+# * warn
+# * error
+log_level = "debug"
+# log_source will output information about the function that generated the log line.
+log_source = false
+```
+
+By default GARM logs everything to standard output. You can optionally log to file by adding the `log_file` option to the `logging` section. The `enable_log_streamer` option allows you to stream GARM logs directly to your terminal. Set this option to `true`, then you can use the following command to stream logs:
+
+```bash
+garm-cli debug-log
+```
+
+The `log_format`, `log_level` and `log_source` options allow you to tweak the logging output. The `log_format` option can be set to `text` or `json`. The `log_level` option can be set to `debug`, `info`, `warn` or `error`. The `log_source` option will output information about the function that generated the log line. All these options influence how the structured logging is output.
+
+This will allow you to ingest GARM logs in a central location such as an ELK stack or similar.
+
+## Database configuration
+
+GARM currently supports SQLite3. Support for other stores will be added in the future.
+
+```toml
+[database]
+ # Turn on/off debugging for database queries.
+ debug = false
+ # Database backend to use. Currently supported backends are:
+ # * sqlite3
+ backend = "sqlite3"
+ # the passphrase option is a temporary measure by which we encrypt the webhook
+ # secret that gets saved to the database, using AES256. In the future, secrets
+ # will be saved to something like Barbican or Vault, eliminating the need for
+ # this. This string needs to be 32 characters in size.
+ passphrase = "shreotsinWadquidAitNefayctowUrph"
+ [database.sqlite3]
+ # Path on disk to the sqlite3 database file.
+ db_file = "/home/runner/garm.db"
+```
+
+## Provider configuration
+
+GARM was designed to be extensible. Providers can be written as external executables which implement the needed interface to create/delete/list compute systems that are used by ```GARM``` to create runners.
+
+### Providers
+
+GARM delegates the functionality needed to create the runners to external executables. These executables can be either binaries or scripts. As long as they adhere to the needed interface, they can be used to create runners in any target IaaS. You might find this behavior familiar if you've ever had to deal with installing `CNIs` in `containerd`. The principle is the same.
+
+The configuration for an external provider is quite simple:
+
+```toml
+# This is an example external provider. External providers are executables that
+# implement the needed interface to create/delete/list compute systems that are used
+# by GARM to create runners.
+[[provider]]
+name = "openstack_external"
+description = "external openstack provider"
+provider_type = "external"
+ [provider.external]
+ # config file passed to the executable via GARM_PROVIDER_CONFIG_FILE environment variable
+ config_file = "/etc/garm/providers.d/openstack/keystonerc"
+ # Absolute path to an executable that implements the provider logic. This executable can be
+ # anything (bash, a binary, python, etc). See documentation in this repo on how to write an
+ # external provider.
+ provider_executable = "/etc/garm/providers.d/openstack/garm-external-provider"
+ # This option will pass all environment variables that start with AWS_ to the provider.
+ # To pass in individual variables, you can add the entire name to the list.
+ environment_variables = ["AWS_"]
+```
+
+The external provider has three options:
+
+* `provider_executable`
+* `config_file`
+* `environment_variables`
+
+The ```provider_executable``` option is the absolute path to an executable that implements the provider logic. GARM will delegate all provider operations to this executable. This executable can be anything (bash, python, perl, go, etc). See [Writing an external provider](./external_provider.md) for more details.
+
+The ```config_file``` option is a path on disk to an arbitrary file, that is passed to the external executable via the environment variable ```GARM_PROVIDER_CONFIG_FILE```. This file is only relevant to the external provider. GARM itself does not read it. Let's take the [OpenStack provider](https://github.com/cloudbase/garm-provider-openstack) as an example. The [config file](https://github.com/cloudbase/garm-provider-openstack/blob/ac46d4d5a542bca96cd0309c89437d3382c3ea26/testdata/config.toml) contains access information for an OpenStack cloud as well as some provider specific options like whether or not to boot from volume and which tenant network to use.
+
+The `environment_variables` option is a list of environment variables that will be passed to the external provider. By default GARM will pass a clean env to providers, consisting only of variables that the [provider interface](./external_provider.md) expects. However, in some situations, provider may need access to certain environment variables set in the env of GARM itself. This might be needed to enable access to IAM roles (ec2) or managed identity (azure). This option takes a list of environment variables or prefixes of environment variables that will be passed to the provider. For example, if you want to pass all environment variables that start with `AWS_` to the provider, you can set this option to `["AWS_"]`.
+
+If you want to implement an external provider, you can use this file for anything you need to pass into the binary when ```GARM``` calls it to execute a particular operation.
+
+#### Available external providers
+
+For non-testing purposes, these are the external providers currently available:
+
+* [OpenStack](https://github.com/cloudbase/garm-provider-openstack)
+* [Azure](https://github.com/cloudbase/garm-provider-azure)
+* [Kubernetes](https://github.com/mercedes-benz/garm-provider-k8s) - Thanks to the amazing folks at @mercedes-benz for sharing their awesome provider!
+* [LXD](https://github.com/cloudbase/garm-provider-lxd)
+* [Incus](https://github.com/cloudbase/garm-provider-incus)
+* [Equinix Metal](https://github.com/cloudbase/garm-provider-equinix)
+* [Amazon EC2](https://github.com/cloudbase/garm-provider-aws)
+* [Google Cloud Platform (GCP)](https://github.com/cloudbase/garm-provider-gcp)
+* [Oracle Cloud Infrastructure (OCI)](https://github.com/cloudbase/garm-provider-oci)
+
+Details on how to install and configure them are available in their respective repositories.
+
+If you wrote a provider and would like to add it to the above list, feel free to open a PR.
+
+
+## The metrics section
+
+This is one of the features in GARM that I really love having. For one thing, it's community contributed and for another, it really adds value to the project. It allows us to create some pretty nice visualizations of what is happening with GARM.
+
+### Common metrics
+
+| Metric name | Type | Labels | Description |
+|--------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------|
+| `garm_health` | Gauge | `controller_id`=<controller id> `callback_url`=<callback url> `controller_webhook_url`=<controller webhook url> `metadata_url`=<metadata url> `webhook_url`=<webhook url> `name`=<hostname> | This is a gauge that is set to 1 if GARM is healthy and 0 if it is not. This is useful for alerting. |
+| `garm_webhooks_received` | Counter | `valid`=<valid request> `reason`=<reason for invalid requests> | This is a counter that increments every time GARM receives a webhook from GitHub. |
+
+### Enterprise metrics
+
+| Metric name | Type | Labels | Description |
+|---------------------------------------|-------|-------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------|
+| `garm_enterprise_info` | Gauge | `id`=<enterprise id> `name`=<enterprise name> | This is a gauge that is set to 1 and expose enterprise information |
+| `garm_enterprise_pool_manager_status` | Gauge | `id`=<enterprise id> `name`=<enterprise name> `running`=<true\|false> | This is a gauge that is set to 1 if the enterprise pool manager is running and set to 0 if not |
+
+### Organization metrics
+
+| Metric name | Type | Labels | Description |
+|-----------------------------------------|-------|-----------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------|
+| `garm_organization_info` | Gauge | `id`=<organization id> `name`=<organization name> | This is a gauge that is set to 1 and expose organization information |
+| `garm_organization_pool_manager_status` | Gauge | `id`=<organization id> `name`=<organization name> `running`=<true\|false> | This is a gauge that is set to 1 if the organization pool manager is running and set to 0 if not |
+
+### Repository metrics
+
+| Metric name | Type | Labels | Description |
+|---------------------------------------|-------|-------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------|
+| `garm_repository_info` | Gauge | `id`=<repository id> `name`=<repository name> | This is a gauge that is set to 1 and expose repository information |
+| `garm_repository_pool_manager_status` | Gauge | `id`=<repository id> `name`=<repository name> `running`=<true\|false> | This is a gauge that is set to 1 if the repository pool manager is running and set to 0 if not |
+
+### Provider metrics
+
+| Metric name | Type | Labels | Description |
+|----------------------|-------|-------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------|
+| `garm_provider_info` | Gauge | `description`=<provider description> `name`=<provider name> `type`=<internal\|external> | This is a gauge that is set to 1 and expose provider information |
+
+### Pool metrics
+
+| Metric name | Type | Labels | Description |
+|-------------------------------|-------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------|
+| `garm_pool_info` | Gauge | `flavor`=<flavor> `id`=<pool id> `image`=<image name> `os_arch`=<defined OS arch> `os_type`=<defined OS name> `pool_owner`=<owner name> `pool_type`=<repository\|organization\|enterprise> `prefix`=<prefix> `provider`=<provider name> `tags`=<concatenated list of pool tags> | This is a gauge that is set to 1 and expose pool information |
+| `garm_pool_status` | Gauge | `enabled`=<true\|false> `id`=<pool id> | This is a gauge that is set to 1 if the pool is enabled and set to 0 if not |
+| `garm_pool_bootstrap_timeout` | Gauge | `id`=<pool id> | This is a gauge that is set to the pool bootstrap timeout |
+| `garm_pool_max_runners` | Gauge | `id`=<pool id> | This is a gauge that is set to the pool max runners |
+| `garm_pool_min_idle_runners` | Gauge | `id`=<pool id> | This is a gauge that is set to the pool min idle runners |
+
+### Runner metrics
+
+| Metric name | Type | Labels | Description |
+|--------------------------------|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|
+| `garm_runner_status` | Gauge | `name`=<runner name> `pool_owner`=<owner name> `pool_type`=<repository\|organization\|enterprise> `provider`=<provider name> `runner_status`=<running\|stopped\|error\|pending_delete\|deleting\|pending_create\|creating\|unknown> `status`=<idle\|pending\|terminated\|installing\|failed\|active> | This is a gauge value that gives us details about the runners garm spawns |
+| `garm_runner_operations_total` | Counter | `provider`=<provider name> `operation`=<CreateInstance\|DeleteInstance\|GetInstance\|ListInstances\|RemoveAllInstances\|Start\Stop> | This is a counter that increments every time a runner operation is performed |
+| `garm_runner_errors_total` | Counter | `provider`=<provider name> `operation`=<CreateInstance\|DeleteInstance\|GetInstance\|ListInstances\|RemoveAllInstances\|Start\Stop> | This is a counter that increments every time a runner operation errored |
+
+### Github metrics
+
+| Metric name | Type | Labels | Description |
+|--------------------------------|---------|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|
+| `garm_github_operations_total` | Counter | `operation`=<ListRunners\|CreateRegistrationToken\|...> `scope`=<Organization\|Repository\|Enterprise> | This is a counter that increments every time a github operation is performed |
+| `garm_github_errors_total` | Counter | `operation`=<ListRunners\|CreateRegistrationToken\|...> `scope`=<Organization\|Repository\|Enterprise> | This is a counter that increments every time a github operation errored |
+
+### Enabling metrics
+
+Metrics are disabled by default. To enable them, add the following to your config file:
+
+```toml
+[metrics]
+
+# Toggle to disable authentication (not recommended) on the metrics endpoint.
+# If you do disable authentication, I encourage you to put a reverse proxy in front
+# of garm and limit which systems can access that particular endpoint. Ideally, you
+# would enable some kind of authentication using the reverse proxy, if the built-in auth
+# is not sufficient for your needs.
+#
+# Default: false
+disable_auth = true
+
+# Toggle metrics. If set to false, the API endpoint for metrics collection will
+# be disabled.
+#
+# Default: false
+enable = true
+
+# period is the time interval when the /metrics endpoint will update internal metrics about
+# controller specific objects (e.g. runners, pools, etc.)
+#
+# Default: "60s"
+period = "30s"
+```
+
+You can choose to disable authentication if you wish, however it's not terribly difficult to set up, so I generally advise against disabling it.
+
+### Configuring prometheus
+
+The following section assumes that your garm instance is running at `garm.example.com` and has TLS enabled.
+
+First, generate a new JWT token valid only for the metrics endpoint:
+
+```bash
+garm-cli metrics-token create
+```
+
+Note: The token validity is equal to the TTL you set in the [JWT config section](#the-jwt-authentication-config-section).
+
+Copy the resulting token, and add it to your prometheus config file. The following is an example of how to add garm as a target in your prometheus config file:
+
+```yaml
+scrape_configs:
+ - job_name: "garm"
+ # Connect over https. If you don't have TLS enabled, change this to http.
+ scheme: https
+ static_configs:
+ - targets: ["garm.example.com"]
+ authorization:
+ credentials: "superSecretTokenYouGeneratedEarlier"
+```
+
+## The JWT authentication config section
+
+This section configures the JWT authentication used by the API server. GARM is currently a single user system and that user has the right to do anything and everything GARM is capable of. As a result, the JWT auth we have does not include a refresh token. The token is valid for the duration of the time to live (TTL) set in the config file. Once the token expires, you will need to log in again.
+
+It is recommended that the secret be a long, randomly generated string. Changing the secret at any time will invalidate all existing tokens.
+
+```toml
+[jwt_auth]
+# A JWT token secret used to sign tokens. Obviously, this needs to be changed :).
+secret = ")9gk_4A6KrXz9D2u`0@MPea*sd6W`%@5MAWpWWJ3P3EqW~qB!!(Vd$FhNc*eU4vG"
+
+# Time to live for tokens. Both the instances and you will use JWT tokens to
+# authenticate against the API. However, this TTL is applied only to tokens you
+# get when logging into the API. The tokens issued to the instances we manage,
+# have a TTL based on the runner bootstrap timeout set on each pool. The minimum
+# TTL for this token is 24h.
+time_to_live = "8760h"
+```
+
+## The API server config section
+
+This section allows you to configure the GARM API server. The API server is responsible for serving all the API endpoints used by the `garm-cli`, the runners that phone home their status and by GitHub when it sends us webhooks.
+
+The config options are fairly straight forward.
+
+```toml
+[apiserver]
+ # Bind the API to this IP
+ bind = "0.0.0.0"
+ # Bind the API to this port
+ port = 9997
+ # Whether or not to set up TLS for the API endpoint. If this is set to true,
+ # you must have a valid apiserver.tls section.
+ use_tls = false
+ # Set a list of allowed origins
+ # By default, if this option is omitted or empty, we will check
+ # only that the origin is the same as the originating server.
+ # A literal of "*" will allow any origin
+ cors_origins = ["*"]
+ [apiserver.tls]
+ # Path on disk to a x509 certificate bundle.
+ # NOTE: if your certificate is signed by an intermediary CA, this file
+ # must contain the entire certificate bundle needed for clients to validate
+ # the certificate. This usually means concatenating the certificate and the
+ # CA bundle you received.
+ certificate = ""
+ # The path on disk to the corresponding private key for the certificate.
+ key = ""
+ [apiserver.webui]
+ enable = true
+```
+
+The GARM API server has the option to enable TLS, but I suggest you use a reverse proxy and enable TLS termination in that reverse proxy. There is an `nginx` sample in this repository with TLS termination enabled.
+
+You can of course enable TLS in both garm and the reverse proxy. The choice is yours.
\ No newline at end of file
diff --git a/doc/database.md b/doc/database.md
deleted file mode 100644
index 59204640..00000000
--- a/doc/database.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Database configuration
-
-Garm currently supports two database backends:
-
-* SQLite3
-* MySQL
-
-You can choose either one of these. For most cases, ```SQLite3``` should do, but feel free to go with MySQL if you wish.
-
- ```toml
- [database]
- # Turn on/off debugging for database queries.
- debug = false
- # Database backend to use. Currently supported backends are:
- # * sqlite3
- # * mysql
- backend = "sqlite3"
- # the passphrase option is a temporary measure by which we encrypt the webhook
- # secret that gets saved to the database, using AES256. In the future, secrets
- # will be saved to something like Barbican or Vault, eliminating the need for
- # this.
- passphrase = "n<$n&P#L*TWqOh95_bN5J1r4mhxY7R84HZ%pvM#1vxJ<7~q%YVsCwU@Z60;7~Djo"
- [database.mysql]
- # If MySQL is used, these are the credentials and connection information used
- # to connect to the server instance.
- # database username
- username = ""
- # Database password
- password = ""
- # hostname to connect to
- hostname = ""
- # database name
- database = ""
- [database.sqlite3]
- # Path on disk to the sqlite3 database file.
- db_file = "/home/runner/file.db"
- ```
diff --git a/doc/events.md b/doc/events.md
new file mode 100644
index 00000000..e643a5c2
--- /dev/null
+++ b/doc/events.md
@@ -0,0 +1,256 @@
+# GARM database events
+
+Starting with GARM version `v0.1.5`, we now have a new websocket endpoint that allows us to subscribe to some events that are emited by the database watcher. Whenever a database entity is created, updated or deleted, the database watcher will notify all interested consumers that an event has occured and as part of that event, we get a copy of the database entity that was affected.
+
+For example, if a new runner is created, the watcher will emit a `Create` event for the `Instances` entity and in the `Payload` field, we will have a copy of the `Instance` entity that was created. Internally, this will be a golang struct, but when exported via the websocket endpoint, it will be a JSON object, with all sensitive info (passwords, keys, secrets in general) stripped out.
+
+This document will focus on the websocket endpoint and the events that are exported by it.
+
+# Entities and operations
+
+Virtually all database entities are exposed through the events endpoint. These entities are defined in the [database common package](https://github.com/cloudbase/garm/blob/56b0e6065a993fd89c74a8b4ab7de3487544e4e0/database/common/watcher.go#L12-L21). Each of the entity types represents a database table in GARM.
+
+Those entities are:
+
+* `repository` - represents a repository in the database
+* `organization` - represents an organization in the database
+* `enterprise` - represents an enterprise in the database
+* `pool` - represents a pool in the database
+* `user` - represents a user in the database. Currently GARM is not multi tenant so we just have the "admin" user
+* `instance` - represents a runner instance in the database
+* `job` - represents a recorded github workflow job in the database
+* `controller` - represents a controller in the database. This is the GARM controller.
+* `github_credentials` - represents a github credential in the database (PAT, Apps, etc). No sensitive info (token, keys, etc) is ever returned by the events endpoint.
+* `github_endpoint` - represents a github endpoint in the database. This holds the github.com default endpoint and any GHES you may add.
+
+The operations hooked up to the events endpoint and the databse wather are:
+
+* `create` - emitted when a new entity is created
+* `update` - emitted when an entity is updated
+* `delete` - emitted when an entity is deleted
+
+# Event structure
+
+The event structure is defined in the [database common package](https://github.com/cloudbase/garm/blob/56b0e6065a993fd89c74a8b4ab7de3487544e4e0/database/common/watcher.go#L30-L34). The structure for a change payload is marshaled into a JSON object as follows:
+
+```json
+{
+ "entity-type": "repository",
+ "operation": "create"
+ "payload": [object]
+}
+```
+
+Where the `payload` will be a JSON representation of one of the entities defined above. Essentially, you can expect to receive a JSON identical to the one you would get if you made an API call to the GARM REST API for that particular entity.
+
+Note that in some cases, the `delete` operation will return the full object prior to the deletion of the entity, while others will only ever return the `ID` of the entity. This will probably be changed in future releases to only return the `ID` in case of a `delete` operation, for all entities. You should operate under the assumption that in the future, delete operations will only return the `ID` of the entity.
+
+# Subscribing to events
+
+By default the events endpoint returns no events. All events are filtered by default. To start receiving events, you need to emit a message on the websocket connection indicating the entities and/or operations you're interested in.
+
+This gives you the option to get fine grained control over what you receive at any given point in time. Of course, you can opt to receive everything and deal with the potential deluge (depends on how busy your GARM instance is) on your own.
+
+## The filter message
+
+The filter is defined as a JSON that you write over the websocket connections. That JSON must adhere to the following schema:
+
+```json
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://github.com/cloudbase/garm/apiserver/events/options",
+ "$ref": "#/$defs/Options",
+ "$defs": {
+ "Filter": {
+ "properties": {
+ "operations": {
+ "items": {
+ "type": "string",
+ "enum": [
+ "create",
+ "update",
+ "delete"
+ ]
+ },
+ "type": "array",
+ "title": "operations",
+ "description": "A list of operations to filter on"
+ },
+ "entity-type": {
+ "type": "string",
+ "enum": [
+ "repository",
+ "organization",
+ "enterprise",
+ "pool",
+ "user",
+ "instance",
+ "job",
+ "controller",
+ "github_credentials",
+ "gitea_credentials",
+ "github_endpoint",
+ "scaleset"
+ ],
+ "title": "entity type",
+ "description": "The type of entity to filter on",
+ "default": "repository"
+ }
+ },
+ "additionalProperties": false,
+ "type": "object"
+ },
+ "Options": {
+ "properties": {
+ "send-everything": {
+ "type": "boolean",
+ "title": "send everything",
+ "default": false
+ },
+ "filters": {
+ "items": {
+ "$ref": "#/$defs/Filter"
+ },
+ "type": "array",
+ "title": "filters",
+ "description": "A list of filters to apply to the events. This is ignored when send-everything is true"
+ }
+ },
+ "additionalProperties": false,
+ "type": "object"
+ }
+ }
+}
+```
+
+But I realize a JSON schema is not the best way to explain how to use the filter. The following examples should give you a better idea of how to use the filter.
+
+### Example 1: Send all events
+
+```json
+{
+ "send-everything": true
+}
+```
+
+### Example 2: Send only `create` events for `repository` entities
+
+```json
+{
+ "send-everything": false,
+ "filters": [
+ {
+ "entity-type": "repository",
+ "operations": ["create"]
+ }
+ ]
+}
+```
+
+### Example 3: Send `create` and `update` for repositories and `delete` for instances
+
+```json
+{
+ "send-everything": false,
+ "filters": [
+ {
+ "entity-type": "repository",
+ "operations": ["create", "update"]
+ },
+ {
+ "entity-type": "instance",
+ "operations": ["delete"]
+ }
+ ]
+}
+```
+
+## Connecting to the events endpoint
+
+You can use any websocket client, written in any programming language to interact with the events endpoint. In the following exmple I'll show you how to do it from go.
+
+Before we start, we'll need a JWT token to access the events endpoint. Normally, if you use the CLI, you should have it in your `~/.local/share/garm-cli` folder. But if you know your username and password, we can fetch a fresh one using `curl`:
+
+```bash
+# Read the password from the terminal
+read -s PASSWD
+
+# Get the token
+curl -s -X POST -d '{"username": "admin", "password": "'$PASSWD'"}' \
+ https://garm.example.com/api/v1/auth/login | jq -r .token
+```
+
+Save the token, we'll need it for later.
+
+Now, let's write a simple go program that connects to the events endpoint and subscribes to all events. We'll use the reader that was added to [`garm-provider-common`](https://github.com/cloudbase/garm-provider-common) in version `v0.1.3`, to make this easier:
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+
+ garmWs "github.com/cloudbase/garm-provider-common/util/websocket"
+ "github.com/gorilla/websocket"
+)
+
+// List of signals to interrupt the program
+var signals = []os.Signal{
+ os.Interrupt,
+ syscall.SIGTERM,
+}
+
+// printToConsoleHandler is a simple function that prints the message to the console.
+// In a real world implementation, you can use this function to decide how to properly
+// handle the events.
+func printToConsoleHandler(_ int, msg []byte) error {
+ fmt.Println(string(msg))
+ return nil
+}
+
+func main() {
+ // Set up the context to listen for signals.
+ ctx, stop := signal.NotifyContext(context.Background(), signals...)
+ defer stop()
+
+ // This is the JWT token you got from the curl command above.
+ token := "superSecretJWTToken"
+ // The base URL of your GARM server
+ baseURL := "https://garm.example.com"
+ // This is the path to the events endpoint
+ pth := "/api/v1/ws/events"
+
+ // Instantiate the websocket reader
+ reader, err := garmWs.NewReader(ctx, baseURL, pth, token, printToConsoleHandler)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ // Start the loop.
+ if err := reader.Start(); err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ // Set the filter to receive all events. You can use a more fine grained filter if you wish.
+ reader.WriteMessage(websocket.TextMessage, []byte(`{"send-everything":true}`))
+
+ fmt.Println("Listening for events. Press Ctrl+C to stop.")
+ // Wait for the context to be done.
+ <-ctx.Done()
+}
+```
+
+If you run this program and change something in the GARM database, you should see the event being printed to the console:
+
+```bash
+gabriel@rossak:/tmp/ex$ go run ./main.go
+{"entity-type":"pool","operation":"update","payload":{"runner_prefix":"garm","id":"8ec34c1f-b053-4a5d-80d6-40afdfb389f9","provider_name":"lxd","max_runners":10,"min_idle_runners":0,"image":"ubuntu:22.04","flavor":"default","os_type":"linux","os_arch":"amd64","tags":[{"id":"76781c93-e354-402e-907a-785caab36207","name":"self-hosted"},{"id":"2ff4a89e-e3b4-4e78-b977-6c21e83cca3d","name":"x64"},{"id":"5b3ffec6-0402-4322-b2a9-fa7f692bbc00","name":"Linux"},{"id":"e95e106d-1a3d-11ee-bd1d-00163e1f621a","name":"ubuntu"},{"id":"3b54ae6c-5e9b-4a81-8e6c-0f78a7b37b04","name":"repo"}],"enabled":true,"instances":[],"repo_id":"70227434-e7c0-4db1-8c17-e9ae3683f61e","repo_name":"gsamfira/scripts","runner_bootstrap_timeout":20,"extra_specs":{"disable_updates":true,"enable_boot_debug":true},"github-runner-group":"","priority":10}}
+```
+
+In the above example, you can see an `update` event on a `pool` entity. The `payload` field contains the full, updated `pool` entity.
diff --git a/doc/external_provider.md b/doc/external_provider.md
index 84d524d9..70b0374d 100644
--- a/doc/external_provider.md
+++ b/doc/external_provider.md
@@ -1,31 +1,31 @@
# Writing an external provider
-External provider enables you to write a fully functional provider, using any scripting or programming language. Garm will call your executable to manage the lifecycle of the instances hosting the runners. This document describes the API that an executable needs to implement to be usable by ```garm```.
+External provider enables you to write a fully functional provider, using any scripting or programming language. Garm will call your executable to manage the lifecycle of the instances hosting the runners. This document describes the API that an executable needs to implement to be usable by `garm`.
## Environment variables
-When ```garm``` calls your executable, a number of environment variables are set, depending on the operation. There are three environment variables that will always be set regardless of operation. Those variables are:
+When `garm` calls your executable, a number of environment variables are set, depending on the operation. There are three environment variables that will always be set regardless of operation. Those variables are:
-* ```GARM_COMMAND```
-* ```GARM_PROVIDER_CONFIG_FILE```
-* ```GARM_CONTROLLER_ID```
+* `GARM_COMMAND`
+* `GARM_PROVIDER_CONFIG_FILE`
+* `GARM_CONTROLLER_ID`
The following are variables that are specific to some operations:
-* ```GARM_POOL_ID```
-* ```GARM_INSTANCE_ID```
+* `GARM_POOL_ID`
+* `GARM_INSTANCE_ID`
### The GARM_COMMAND variable
-The ```GARM_COMMAND``` environment variable will be set to one of the operations defined in the interface. When your executable is called, you'll need to inspect this variable to know which operation you need to execute.
+The `GARM_COMMAND` environment variable will be set to one of the operations defined in the interface. When your executable is called, you'll need to inspect this variable to know which operation you need to execute.
### The GARM_PROVIDER_CONFIG_FILE variable
-The ```GARM_PROVIDER_CONFIG_FILE``` variable will contain a path on disk to a file that can contain whatever configuration your executable needs. For example, in the case of the [sample OpenStack external provider](../contrib/providers.d/openstack/keystonerc), this file contains variables that you would normally find in a ```keystonerc``` file, used to access an OpenStack cloud. But you can use it to add any extra configuration you need.
+The `GARM_PROVIDER_CONFIG_FILE` variable will contain a path on disk to a file that can contain whatever configuration your executable needs. For example, in the case of the [OpenStack external provider](https://github.com/cloudbase/garm-provider-openstack), this file is a toml which contains provider specific configuration options. The provider author decides what this file needs to contain for the provider to function properly.
-The config is opaque to ```garm``` itself. It only has meaning for your external provider.
+GARM does not read this file in any way. It is simply passed to the executable via the environment variable.
-In your executable, you could implement something like this:
+The OpenStack provider mentioned above is written in Go, but it doesn't need to be. For example, if your provider is written in BASH, handling the config file could look something like this:
```bash
if [ -f "${GARM_PROVIDER_CONFIG_FILE}" ];then
@@ -54,37 +54,37 @@ esac
### The GARM_CONTROLLER_ID variable
-The ```GARM_CONTROLLER_ID``` variable is set for all operations.
+The `GARM_CONTROLLER_ID` variable is set for all operations.
When garm first starts up, it generates a unique ID that identifies it as an instance. This ID is passed to the provider and should always be used to tag resources in whichever cloud you write your provider for. This ensures that if you have multiple garm installations, one particular deployment of garm will never touch any resources it did not create.
-In most clouds you can attach ```tags``` to resources. You can use the controller ID as one of the tags during the ```CreateInstance``` operation.
+In most clouds you can attach `tags` to resources. You can use the controller ID as one of the tags during the `CreateInstance` operation.
### The GARM_POOL_ID variable
-The ```GARM_POOL_ID``` environment variable is a ```UUID4``` describing the pool in which a runner is created. This variable is set in two operations:
+The `GARM_POOL_ID` environment variable is a `UUID4` describing the pool in which a runner is created. This variable is set in two operations:
* CreateInstance
* ListInstances
-As with the ```GARM_CONTROLLER_ID```, this ID **must** also be attached as a tag or whichever mechanism your target cloud supports, to identify the pool to which the resources (in most cases the VMs) belong to.
+As with the `GARM_CONTROLLER_ID`, this ID **must** also be attached as a tag or whichever mechanism your target cloud supports, to identify the pool to which the resources (in most cases the VMs) belong to.
### The GARM_INSTANCE_ID variable
-The ```GARM_INSTANCE_ID``` environment variable is used in four operations:
+The `GARM_INSTANCE_ID` environment variable is used in four operations:
* GetInstance
* DeleteInstance
* Start
* Stop
-It contains the ```provider_id``` of the instance. The ```provider_id``` is a unique identifier, specific to the IaaS in which the compute resource was created. In OpenStack, it's an ```UUID4```, while in LXD, it's the virtual machine's name.
+It contains the `provider_id` of the instance. The `provider_id` is a unique identifier, specific to the IaaS in which the compute resource was created. In OpenStack, it's an `UUID4`, while in LXD, it's the virtual machine's name.
We need this ID whenever we need to execute an operation that targets one specific runner.
## Operations
-The operations that a provider must implement are described in the ```Provider``` [interface available here](https://github.com/cloudbase/garm/blob/223477c4ddfb6b6f9079c444d2f301ef587f048b/runner/providers/external/execution/interface.go#L9-L27). The external provider implements this interface, and delegates each operation to your external executable. [These operations are](https://github.com/cloudbase/garm/blob/223477c4ddfb6b6f9079c444d2f301ef587f048b/runner/providers/external/execution/commands.go#L5-L13):
+The operations that a provider must implement are described in the `Provider` [interface available here](https://github.com/cloudbase/garm/blob/223477c4ddfb6b6f9079c444d2f301ef587f048b/runner/providers/external/execution/interface.go#L9-L27). The external provider implements this interface, and delegates each operation to your external executable. [These operations are](https://github.com/cloudbase/garm/blob/223477c4ddfb6b6f9079c444d2f301ef587f048b/runner/providers/external/execution/commands.go#L5-L13):
* CreateInstance
* DeleteInstance
@@ -96,30 +96,30 @@ The operations that a provider must implement are described in the ```Provider``
## CreateInstance
-The ```CreateInstance``` command has the most moving parts. The ideal external provider is one that will create all required resources for a fully functional instance, will start the instance. Waiting for the instance to start is not necessary. If the instance can reach the ```callback_url``` configured in ```garm```, it will update it's own status when it starts running the userdata script.
+The `CreateInstance` command has the most moving parts. The ideal external provider is one that will create all required resources for a fully functional instance, will start the instance. Waiting for the instance to start is not necessary. If the instance can reach the `callback_url` configured in `garm`, it will update it's own status when it starts running the userdata script.
But aside from creating resources, the ideal external provider is also idempotent, and will clean up after itself in case of failure. If for any reason the executable will fail to create the instance, any dependency that it has created up to the point of failure, should be cleaned up before returning an error code.
-At the very least, it must be able to clean up those resources, if it is called with the ```DeleteInstance``` command by ```garm```. Garm will retry creating a failed instance. Before it tries again, it will attempt to run a ```DeleteInstance``` using the ```provider_id``` returned by your executable.
+At the very least, it must be able to clean up those resources, if it is called with the `DeleteInstance` command by `garm`. Garm will retry creating a failed instance. Before it tries again, it will attempt to run a `DeleteInstance` using the `provider_id` returned by your executable.
-If your executable failed before a ```provider_id``` could be supplied, ```garm``` will send the name of the instance as a ```GARM_INSTANCE_ID``` environment variable.
+If your executable failed before a `provider_id` could be supplied, `garm` will send the name of the instance as a `GARM_INSTANCE_ID` environment variable.
-Your external provider will need to be able to handle both. The instance name generated by ```garm``` will be unique, so it's fairly safe to use when deleting instances.
+Your external provider will need to be able to handle both. The instance name generated by `garm` will be unique, so it's fairly safe to use when deleting instances.
### CreateInstance inputs
-The ```CreateInstance``` command is the only command that needs to handle standard input. Garm will send the runner bootstrap information in stdin. The environment variables set for this command are:
+The `CreateInstance` command is the only command that needs to handle standard input. Garm will send the runner bootstrap information in stdin. The environment variables set for this command are:
* GARM_PROVIDER_CONFIG_FILE - Config file specific to your executable
* GARM_COMMAND - the command we need to run
-* GARM_CONTROLLER_ID - The unique ID of the ```garm``` installation
+* GARM_CONTROLLER_ID - The unique ID of the `garm` installation
* GARM_POOL_ID - The unique ID of the pool this node is a part of
-The information sent in via standard input is a ```json``` serialized instance of the [BootstrapInstance structure](https://github.com/cloudbase/garm/blob/6b3ea50ca54501595e541adde106703d289bb804/params/params.go#L164-L217)
+The information sent in via standard input is a `json` serialized instance of the [BootstrapInstance structure](https://github.com/cloudbase/garm/blob/6b3ea50ca54501595e541adde106703d289bb804/params/params.go#L164-L217)
Here is a sample of that:
- ```json
+```json
{
"name": "garm-ny9HeeQYw2rl",
"tools": [
@@ -174,7 +174,7 @@ Here is a sample of that:
}
],
"repo_url": "https://github.com/gabriel-samfira/scripts",
- "callback-url": "https://garm.example.com/api/v1/callbacks/status",
+ "callback-url": "https://garm.example.com/api/v1/callbacks",
"metadata-url": "https://garm.example.com/api/v1/metadata",
"instance-token": "super secret JWT token",
"extra_specs": {
@@ -188,46 +188,43 @@ Here is a sample of that:
"image": "8ed8a690-69b6-49eb-982f-dcb466895e2d",
"labels": [
"ubuntu",
- "self-hosted",
- "x64",
- "linux",
"openstack",
"runner-controller-id:f9286791-1589-4f39-a106-5b68c2a18af4",
"runner-pool-id:9dcf590a-1192-4a9c-b3e4-e0902974c2c0"
],
"pool_id": "9dcf590a-1192-4a9c-b3e4-e0902974c2c0"
}
- ```
+```
In your executable you can read in this blob, by using something like this:
- ```bash
+```bash
# Test if the stdin file descriptor is opened
if [ ! -t 0 ]
then
# Read in the information from standard in
INPUT=$(cat -)
fi
- ```
+```
-Then you can easily parse it. If you're using ```bash```, you can use the amazing [jq json processor](https://stedolan.github.io/jq/). Other programming languages have suitable libraries that can handle ```json```.
+Then you can easily parse it. If you're using `bash`, you can use the amazing [jq json processor](https://stedolan.github.io/jq/). Other programming languages have suitable libraries that can handle `json`.
You will have to parse the bootstrap params, verify that the requested image exists, gather operating system information, CPU architecture information and using that information, you will need to select the appropriate tools for the arch/OS combination you are deploying.
Refer to the OpenStack or Azure providers available in the [providers.d](../contrib/providers.d/) folder. Of particular interest are the [cloudconfig folders](../contrib/providers.d/openstack/cloudconfig/), where the instance user data templates are stored. These templates are used to generate the needed automation for the instances to download the github runner agent, send back status updates (including the final github runner agent ID), and download the github runner registration token from garm.
-Examples of external providers written in Go can be found at the followinf locations:
+Examples of external providers written in Go can be found at the following locations:
*
*
### CreateInstance outputs
-On success, your executable is expected to print to standard output a json that can be deserialized into an ```Instance{}``` structure [defined here](https://github.com/cloudbase/garm/blob/6b3ea50ca54501595e541adde106703d289bb804/params/params.go#L90-L154).
+On success, your executable is expected to print to standard output a json that can be deserialized into an `Instance{}` structure [defined here](https://github.com/cloudbase/garm/blob/6b3ea50ca54501595e541adde106703d289bb804/params/params.go#L90-L154).
Not all fields are expected to be populated by the provider. The ones that should be set are:
- ```json
+```json
{
"provider_id": "88818ff3-1fca-4cb5-9b37-84bfc3511ea6",
"name": "garm-ny9HeeQYw2rl",
@@ -239,17 +236,17 @@ Not all fields are expected to be populated by the provider. The ones that shoul
"pool_id": "41c4a43a-acee-493a-965b-cf340b2c775d",
"provider_fault": ""
}
- ```
+```
-In case of error, ```garm``` expects at the very least to see a non-zero exit code. If possible, your executable should return as much information as possible via the above ```json```, with the ```status``` field set to ```error``` and the ```provider_fault``` set to a meaningful error message describing what has happened. That information will be visible when doing a:
+In case of error, `garm` expects at the very least to see a non-zero exit code. If possible, your executable should return as much information as possible via the above `json`, with the `status` field set to `error` and the `provider_fault` set to a meaningful error message describing what has happened. That information will be visible when doing a:
- ```bash
+```bash
garm-cli runner show
- ```
+```
## DeleteInstance
-The ```DeleteInstance``` command will permanently remove an instance from the cloud provider.
+The `DeleteInstance` command will permanently remove an instance from the cloud provider.
The environment variables set for this command are:
@@ -258,13 +255,13 @@ The environment variables set for this command are:
* GARM_INSTANCE_ID
* GARM_PROVIDER_CONFIG_FILE
-This command is not expected to output anything. On success it should simply ```exit 0```.
+This command is not expected to output anything. On success it should simply `exit 0`.
If the target instance does not exist in the provider, this command is expected to be a no-op.
## GetInstance
-The ```GetInstance``` command will return details about the instance, as seen by the provider.
+The `GetInstance` command will return details about the instance, as seen by the provider.
The environment variables set for this command are:
@@ -273,13 +270,13 @@ The environment variables set for this command are:
* GARM_INSTANCE_ID
* GARM_PROVIDER_CONFIG_FILE
-On success, this command is expected to return a valid ```json``` that can be deserialized into an ```Instance{}``` structure (see CreateInstance). If possible, IP addresses allocated to the VM should be returned in addition to the sample ```json``` printed above.
+On success, this command is expected to return a valid `json` that can be deserialized into an `Instance{}` structure (see CreateInstance). If possible, IP addresses allocated to the VM should be returned in addition to the sample `json` printed above.
On failure, this command is expected to return a non-zero exit code.
## ListInstances
-The ```ListInstances``` command will print to standard output, a json that is deserializable into an **array** of ```Instance{}```.
+The `ListInstances` command will print to standard output, a json that is deserializable into an **array** of `Instance{}`.
The environment variables set for this command are:
@@ -288,15 +285,15 @@ The environment variables set for this command are:
* GARM_PROVIDER_CONFIG_FILE
* GARM_POOL_ID
-This command must list all instances that have been tagged with the value in ```GARM_POOL_ID```.
+This command must list all instances that have been tagged with the value in `GARM_POOL_ID`.
-On success, a ```json``` is expected on standard output.
+On success, a `json` is expected on standard output.
On failure, a non-zero exit code is expected.
## RemoveAllInstances
-The ```RemoveAllInstances``` operation will remove all resources created in a cloud that have been tagged with the ```GARM_CONTROLLER_ID```. External providers should tag all resources they create with the garm controller ID. That tag can then be used to identify all resources when attempting to delete all instances.
+The `RemoveAllInstances` operation will remove all resources created in a cloud that have been tagged with the `GARM_CONTROLLER_ID`. External providers should tag all resources they create with the garm controller ID. That tag can then be used to identify all resources when attempting to delete all instances.
The environment variables set for this command are:
@@ -312,7 +309,7 @@ Note: This command is currently not used by garm.
## Start
-The ```Start``` operation will start the virtual machine in the selected cloud.
+The `Start` operation will start the virtual machine in the selected cloud.
The environment variables set for this command are:
@@ -327,9 +324,9 @@ On failure, a non-zero exit code is expected.
## Stop
-NOTE: This operation is currently not use by ```garm```, but should be implemented.
+NOTE: This operation is currently not use by `garm`, but should be implemented.
-The ```Stop``` operation will stop the virtual machine in the selected cloud.
+The `Stop` operation will stop the virtual machine in the selected cloud.
Available environment variables:
diff --git a/doc/extra_specs.md b/doc/extra_specs.md
new file mode 100644
index 00000000..859b1fbd
--- /dev/null
+++ b/doc/extra_specs.md
@@ -0,0 +1,8 @@
+# ExtraSpecs
+
+ExtraSpecs is an opaque raw json that gets sent to the provider as part of the bootstrap params for instances. It can contain any kind of data needed by providers. The contents of this field means nothing to garm itself. We don't act on the information in this field at all. We only validate that it's a proper json.
+
+However, during the installation phase of the runners, GARM providers can leverage the information set in this field to augment the process in many ways. This can be used for anything ranging from overriding provider config values, to supplying a different runner install template, to passing in information that is relevant only to specific providers.
+
+For example, the [external OpenStack provider](https://github.com/cloudbase/garm-provider-openstack) uses this to [override](https://github.com/cloudbase/garm-provider-openstack#tweaking-the-provider) things like `security groups`, `storage backends`, `network ids`, etc.
+
diff --git a/doc/gitea.md b/doc/gitea.md
new file mode 100644
index 00000000..72d3a202
--- /dev/null
+++ b/doc/gitea.md
@@ -0,0 +1,358 @@
+# Using GARM with Gitea
+
+Starting with Gitea 1.24 and the latest version of GARM (upcomming v0.2.0 - currently `main`), GARM supports Gitea as a forge, side by side with GitHub/GHES. A new endpoint type has been added to represent Gitea instances, which you can configure and use along side your GitHub runners.
+
+You can essentially create runners for both GitHub and Gitea using the same GARM instance, using the same CLI and the same API. It's simply a matter of adding an endpoint and credentials. The rest is the same as for github.
+
+## Quickstart
+
+This is for testing purposes only. We'll assume you're running on an Ubuntu 24.04 VM or server. You can use anything you'd like, but this quickstart is tailored to get you up and running with the LXD provider. So we'll:
+
+* Initialize LXD
+* Create a docker compose yaml
+* Deploy Gitea and GARM
+* Configure GARM to use Gitea
+
+You will have to install Docker-CE yourself.
+
+### Initialize LXD
+
+If you already have LXD initialized, you can skip this step. Otherwise, simply run:
+
+```bash
+sudo lxd init --auto
+```
+
+This should set up LXD with default settings that should work on any system.
+
+LXD and Docker sometimes have issues with networking due to some conflicting iptables rules. In most cases, if you have docker installed and notice that you don't have access to the outside world from the containers, run the following command:
+
+```bash
+sudo iptables -I DOCKER-USER -j ACCEPT
+```
+
+### Create the docker compose
+
+Create a docker compose file in `$HOME/compose.yaml`. This docker compose will deploy both gitea and GARM. If you already have a Gitea >=1.24.0, you can edit this docker compose to only deploy GARM.
+
+```yaml
+networks:
+ default:
+ external: false
+
+services:
+ gitea:
+ image: docker.gitea.com/gitea:1.24.0-rc0
+ container_name: gitea
+ environment:
+ - USER_UID=1000
+ - USER_GID=1000
+ restart: always
+ networks:
+ - default
+ volumes:
+ - /etc/gitea/gitea:/data
+ - /etc/timezone:/etc/timezone:ro
+ - /etc/localtime:/etc/localtime:ro
+ ports:
+ - "80:80"
+ - "22:22"
+ garm:
+ image: ghcr.io/cloudbase/garm:${GARM_VERSION:-nightly}
+ container_name: garm
+ environment:
+ - USER_UID=1000
+ - USER_GID=1000
+ restart: always
+ networks:
+ - default
+ volumes:
+ - /etc/garm:/etc/garm
+ - /etc/timezone:/etc/timezone:ro
+ - /etc/localtime:/etc/localtime:ro
+ # Give GARM access to the LXD socket. We need this later in the LXD provider.
+ - /var/snap/lxd/common/lxd/unix.socket:/var/snap/lxd/common/lxd/unix.socket
+ ports:
+ - "9997:9997"
+```
+
+Create the folders for Gitea and GARM:
+
+```bash
+sudo mkdir -p /etc/gitea /etc/garm
+sudo chown 1000:1000 /etc/gitea /etc/garm
+```
+
+Create the GARM configuration file:
+
+```bash
+
+sudo tee /etc/garm/config.toml <_YE+$%d+O;BMDqnaB)`U4_*iF8snEpEszPyg4N*lI&"
+time_to_live = "8760h"
+
+[apiserver]
+ bind = "0.0.0.0"
+ port = 9997
+ use_tls = false
+
+[database]
+ backend = "sqlite3"
+ # This needs to be changed.
+ passphrase = "OsawnUlubmuHontamedOdVurwetEymni"
+ [database.sqlite3]
+ db_file = "/etc/garm/garm.db"
+
+# This enables the LXD provider. There are other providers available in the image
+# in /opt/garm/providers.d. Feel free to use them as well.
+[[provider]]
+ name = "lxd_local"
+ provider_type = "external"
+ description = "Local LXD installation"
+ [provider.external]
+ provider_executable = "/opt/garm/providers.d/garm-provider-lxd"
+ config_file = "/etc/garm/garm-provider-lxd.toml"
+EOF
+```
+
+Create the LXD provider config file:
+
+```bash
+sudo tee /etc/garm/garm-provider-lxd.toml <
-Garm uses a [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) to create runner registration tokens, list current self hosted runners and potentially remove them if they become orphaned (the VM was manually removed on the provider).
+- [Configuring github endpoints and credentials](#configuring-github-endpoints-and-credentials)
+ - [Create GitHub endpoint](#create-github-endpoint)
+ - [Listing GitHub endpoints](#listing-github-endpoints)
+ - [Adding GitHub credentials](#adding-github-credentials)
+ - [Listing GitHub credentials](#listing-github-credentials)
+ - [Deleting GitHub credentials](#deleting-github-credentials)
-From the list of scopes, you will need to select:
+
+
+## Create GitHub endpoint
+
+To create a new GitHub endpoint, you can use the following command:
+
+```bash
+garm-cli github endpoint create \
+ --name example \
+ --description "Just an example ghes endpoint" \
+ --base-url https://ghes.example.com \
+ --upload-url https://upload.ghes.example.com \
+ --api-base-url https://api.ghes.example.com \
+ --ca-cert-path $HOME/ca-cert.pem
+```
+
+## Listing GitHub endpoints
+
+To list the available GitHub endpoints, you can use the following command:
+
+```bash
+ubuntu@garm:~/garm$ garm-cli github endpoint list
++------------+--------------------------+-------------------------------+
+| NAME | BASE URL | DESCRIPTION |
++------------+--------------------------+-------------------------------+
+| github.com | https://github.com | The github.com endpoint |
++------------+--------------------------+-------------------------------+
+| example | https://ghes.example.com | Just an example ghes endpoint |
++------------+--------------------------+-------------------------------+
+```
+
+## Adding GitHub credentials
+
+GARM has the option to use both [Personal Access Tokens (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) or a [GitHub App](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app).
+
+
+If you'll use a PAT (classic), you'll have to grant access for the following scopes:
* ```public_repo``` - for access to a repository
* ```repo``` - for access to a private repository
* ```admin:org``` - if you plan on using this with an organization to which you have access
* ```manage_runners:enterprise``` - if you plan to use garm at the enterprise level
+* ```admin:repo_hook``` - if you want to allow GARM to install webhooks on repositories (optional)
+* ```admin:org_hook``` - if you want to allow GARM to install webhooks on organizations (optional)
-The resulting token must be configured in the ```[[github]]``` section of the config. Sample as follows:
+Fine grained PATs are also supported as long as you grant the required privileges:
-```toml
-# This is a list of credentials that you can define as part of the repository
-# or organization definitions. They are not saved inside the database, as there
-# is no Vault integration (yet). This will change in the future.
-# Credentials defined here can be listed using the API. Obviously, only the name
-# and descriptions are returned.
-[[github]]
- name = "gabriel"
- description = "github token or user gabriel"
- # This is a personal token with access to the repositories and organizations
- # you plan on adding to garm. The "workflow" option needs to be selected in order
- # to work with repositories, and the admin:org needs to be set if you plan on
- # adding an organization.
- oauth2_token = "super secret token"
- # base_url (optional) is the URL at which your GitHub Enterprise Server can be accessed.
- # If these credentials are for github.com, leave this setting blank
- base_url = "https://ghe.example.com"
- # api_base_url (optional) is the base URL where the GitHub Enterprise Server API can be accessed.
- # Leave this blank if these credentials are for github.com.
- api_base_url = "https://ghe.example.com"
- # upload_base_url (optional) is the base URL where the GitHub Enterprise Server upload API can be accessed.
- # Leave this blank if these credentials are for github.com, or if you don't have a separate URL
- # for the upload API.
- upload_base_url = "https://api.ghe.example.com"
- # ca_cert_bundle (optional) is the CA certificate bundle in PEM format that will be used by the github
- # client to talk to the API. This bundle will also be sent to all runners as bootstrap params.
- # Use this option if you're using a self signed certificate.
- # Leave this blank if you're using github.com or if your certificate is signed by a valid CA.
- ca_cert_bundle = "/etc/garm/ghe.crt"
+* **Repository permissions**:
+ * `Administration: Read & write` - needed to generate JIT config/registration token, remove runners, etc.
+ * `Metadata: Read-only` - automatically enabled by above
+ * `Webhooks: Read & write` - needed to install webhooks on repositories
+* **Organization permissions**:
+ * `Self-hosted runners: Read & write` - needed to manage runners in an organization
+ * `Webhooks: Read & write` - needed to install webhooks on organizations
+
+If you plan to use github apps, you'll need to select the following permissions:
+
+* **Repository permissions**:
+ * ```Administration: Read & write```
+ * ```Metadata: Read-only```
+ * ```Webhooks: Read & write```
+* **Organization permissions**:
+ * ```Self-hosted runners: Read & write```
+ * ```Webhooks: Read & write```
+
+**Note** :warning:: Github Apps are not available at the enterprise level.
+
+To add a new GitHub credential, you can use the following command:
+
+```bash
+garm-cli github credentials add \
+ --name gabriel \
+ --description "GitHub PAT for user gabriel" \
+ --auth-type pat \
+ --pat-oauth-token gh_theRestOfThePAT \
+ --endpoint github.com
```
-The double parenthesis means that this is an array. You can specify the ```[[github]]``` section multiple times, with different tokens from different users, or with different access levels. You will then be able to list the available credentials using the API, and reference these credentials when adding repositories or organizations.
+To add a new GitHub App credential, you can use the following command:
-The API will only ever return the name and description to the API consumer.
+```bash
+garm-cli github credentials add \
+ --name gabriel_app \
+ --description "Github App with access to repos" \
+ --endpoint github.com \
+ --auth-type app \
+ --app-id 1 \
+ --app-installation-id 99 \
+ --private-key-path $HOME/yourAppName.2024-03-01.private-key.pem
+```
+
+All sensitive data is encrypted at rest. The API will not return any sensitive info.
+
+## Listing GitHub credentials
+
+To list the available GitHub credentials, you can use the following command:
+
+```bash
+garm-cli github credentials list
+```
+
+## Deleting GitHub credentials
+
+To delete a GitHub credential, you can use the following command:
+
+```bash
+garm-cli github credentials delete
+```
\ No newline at end of file
diff --git a/doc/images/garm-dark.diagram.svg b/doc/images/garm-dark.diagram.svg
new file mode 100644
index 00000000..d48f2616
--- /dev/null
+++ b/doc/images/garm-dark.diagram.svg
@@ -0,0 +1,4 @@
+
+
+
+GitHub/GHES GARM Incus/LXD Providers k8s AWS EC2 Azure LXD/Incus Provider creates compute instance in target infrastructure
Provider creates compute instance in target infrastructure
GCP/openstack/etc Web hook endpoint records/updates job
Web hook endpoint records/updates job Webhook Endpoint Job queue Pool manager consumes jobs in "queued" state
Pool manager consumes jobs in "queued" state Entities (repos/orgs/enterprises)
Entities (repos/orgs/enterprises) Pool Manager Pool leverages provider to create instance
Pool leverages provider to create instance Pool s
(homogeneous set of ephemeral runners)
Pools... Instances fetch their metadata and report installation progress
Instances fetch their metadata and report installation progress garm-runnerN Azure garm-runnerN AWS EC2 garm-runnerN k8s
garm-runnerN Entities (repos/orgs/enterprises)
Entities (repos/orgs/enterprises) Webhook signals new Job Webhooks Pool manager selects appropriate pool
Pool manager selects appropriate pool garm-runner1 garm-runnerN Runner status is updated in the GARM DB
Runner status is updated in the GARM DB Callback URLs (metadata, status updates, etc)
Callback URLs (metadata, status updates, etc) Self-hosted runners garm-runner1 garm-runner3 garm-runner2 garm-runnerN The GitHub runner registers itself in the target entity
The GitHub runner registers itself in the target entity WebSocket (logs/events)
\ No newline at end of file
diff --git a/doc/images/garm-dark.svg b/doc/images/garm-dark.svg
new file mode 100644
index 00000000..f0a0c564
--- /dev/null
+++ b/doc/images/garm-dark.svg
@@ -0,0 +1,37 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/images/garm-light.diagram.svg b/doc/images/garm-light.diagram.svg
new file mode 100644
index 00000000..754eafd9
--- /dev/null
+++ b/doc/images/garm-light.diagram.svg
@@ -0,0 +1,4 @@
+
+
+
+GitHub/GHES GARM Incus/LXD Providers k8s AWS EC2 Azure LXD/Incus Provider creates compute instance in target infrastructure
Provider creates compute instance in target infrastructure
GCP/openstack/etc Web hook endpoint records/updates job
Web hook endpoint records/updates job Webhook Endpoint Job queue Pool manager consumes jobs in "queued" state
Pool manager consumes jobs in "queued" state Entities (repos/orgs/enterprises)
Entities (repos/orgs/enterprises) Pool Manager Pool leverages provider to create instance
Pool leverages provider to create instance Pool s
(homogeneous set of ephemeral runners)
Pools... Instances fetch their metadata and report installation progress
Instances fetch their metadata and report installation progress garm-runnerN Azure garm-runnerN AWS EC2 garm-runnerN k8s
garm-runnerN Entities (repos/orgs/enterprises)
Entities (repos/orgs/enterprises) Webhook signals new Job Webhooks Pool manager selects appropriate pool
Pool manager selects appropriate pool garm-runner1 garm-runnerN Runner status is updated in the GARM DB
Runner status is updated in the GARM DB Callback URLs (metadata, status updates, etc)
Callback URLs (metadata, status updates, etc) Self-hosted runners garm-runner1 garm-runner3 garm-runner2 garm-runnerN The GitHub runner registers itself in the target entity
The GitHub runner registers itself in the target entity WebSocket (logs/events)
\ No newline at end of file
diff --git a/doc/images/garm-light.svg b/doc/images/garm-light.svg
new file mode 100644
index 00000000..2495959d
--- /dev/null
+++ b/doc/images/garm-light.svg
@@ -0,0 +1,36 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/images/input_url.png b/doc/images/input_url.png
new file mode 100644
index 00000000..8cc9ce9f
Binary files /dev/null and b/doc/images/input_url.png differ
diff --git a/doc/images/jobs.png b/doc/images/jobs.png
new file mode 100644
index 00000000..8e808259
Binary files /dev/null and b/doc/images/jobs.png differ
diff --git a/doc/images/select_events.png b/doc/images/select_events.png
new file mode 100644
index 00000000..0ec5c7e0
Binary files /dev/null and b/doc/images/select_events.png differ
diff --git a/doc/images/tls_config.png b/doc/images/tls_config.png
new file mode 100644
index 00000000..b06fb18a
Binary files /dev/null and b/doc/images/tls_config.png differ
diff --git a/doc/images/webhooks.png b/doc/images/webhooks.png
new file mode 100644
index 00000000..91210fae
Binary files /dev/null and b/doc/images/webhooks.png differ
diff --git a/doc/labels.md b/doc/labels.md
new file mode 100644
index 00000000..12daf605
--- /dev/null
+++ b/doc/labels.md
@@ -0,0 +1,15 @@
+# Labels and Tags
+
+Github Runners can be tagged with labels. These labels can be used to restrict the jobs that can run on a runner. For example, you can have a runner with the label `linux` and another with the label `windows`. You can then restrict a job to run only on a runner with the label `linux`.
+
+Whenever a new runner register themselves on Github, the runner knows its own labels as the labels are defined in the pool specification as tags.
+
+Before version 2.305.0 of the runner and before JIT runners were introduced, the runner registration process would append some default labels to the runner. These labels are:
+
+```yaml
+[ 'self-hosted', '$OS_TYPE', '$OS_ARCH' ]
+```
+
+This made scheduling and using runners a bit awkward in some situations. For example, in large organizations with many teams, often times workflows would simply target the `self-hosted` label. This would match all runners regardless of any other custom labels. This had the side effect that workflows would potentially use expensive runners for simple jobs or would select low resource runners for tasks that would require a lot of resources.
+
+Version 2.305.0 of the runner introduced the `--no-default-labels` flag when registering the runner. When JIT is not available (GHES version < 3.10), GARM will now register the runner with the `--no-default-labels` flag. If you still need the default labels, you can still add them when creating the pool as part of the `--tags` command line option.
diff --git a/doc/performance_considerations.md b/doc/performance_considerations.md
new file mode 100644
index 00000000..42b81db5
--- /dev/null
+++ b/doc/performance_considerations.md
@@ -0,0 +1,76 @@
+# Performance considerations
+
+Performance is often important when running GitHub action runners with garm. This document shows some ways to improve the creation time of a GitHub action runner.
+
+## GARM specific performance considerations
+
+### Bundle the GitHub action runner
+
+When a new instance is created by garm, it usually downloads the latest available GitHub action runner binary, installs the requirements and starts it afterwards. This can be a time consuming task that quickly adds up when a lot of instances are created by garm throughout the day. Therefore it is recommended to include the GitHub action runner binary inside of the used image.
+
+Example steps for setting a cached runner on a linux image in LXD:
+
+```bash
+# Create a temporary instance from your base image
+lxc launch temp
+
+# Enter bash inside the container
+lxc exec temp -- bash
+
+# Get and install the runner
+mkdir -p /home/runner/actions-runner
+cd /home/runner/actions-runner
+curl -O -L https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-linux-x64-2.320.0.tar.gz
+# Extract the installer
+tar xzf ./actions-runner-linux-x64-2.320.0.tar.gz
+
+# Exit the container
+exit
+
+# Stop the instance and publish it as a new image
+lxc stop temp
+lxc publish temp --alias BASE_IMAGE-2.320.0
+
+# Delete the temporary instance
+lxc delete temp
+
+# Update garm to use the new image
+garm-cli pool update \
+ --image=BASE_IMAGE-2.320.0
+```
+
+You can read more about cached runners in the [Using Cached Runners](https://github.com/cloudbase/garm/blob/main/doc/using_cached_runners.md) documentation.
+
+### Disable updates
+
+By default garm configures the `cloud-init` process of a new instance to update packages on startup. To prevent this from happening (and therefore reduce the time needed to start an instance) garm can be configured accordingly.
+
+Example to disable this on LXD provider:
+
+```bash
+garm-cli pool update \
+ --extra-specs='{"disable_updates": true}'
+```
+
+## LXD specific performance considerations
+
+### Storage driver
+
+LXD supports various [storage drivers](https://linuxcontainers.org/lxd/docs/latest/reference/storage_drivers/) out of the box. These storage drivers support different features which influence the creation time of a new instance. Most notably check if the driver supports `Optimized image storage` and `Optimized instance creation` as these have the biggest impact on instance creation time.
+
+If you're not sure which storage driver is currently used, check your storages with `lxc storage list`.
+
+### Use shiftfs/idmapped mounts
+
+Whenever a new unprivileged instance is started on LXD, its filesystem gets remapped. This is a time consuming task which depends on the image size that's being used. For large images this can easily take over a minute to complete. There are two ways to get around this: `shiftfs` or `idmapped mounts`. While the latter is the preferred one, not all filesystems currently support it, so in most cases enabling `shiftfs` show a significant performance improvement.
+
+Example on how to enable it on a snap installed LXD:
+
+```bash
+snap set lxd shiftfs.enable=true
+systemctl reload snap.lxd.daemon
+```
+
+Some details and discussions around `shiftfs` can be found [here](https://discuss.linuxcontainers.org/t/trying-out-shiftfs/5155).
+
+Note: When `shiftfs` is used, mounting between host and container might need some extra steps to be secure. See [here](https://discuss.linuxcontainers.org/t/share-folders-and-volumes-between-host-and-containers/7735) for details.
\ No newline at end of file
diff --git a/doc/providers.md b/doc/providers.md
deleted file mode 100644
index d1042130..00000000
--- a/doc/providers.md
+++ /dev/null
@@ -1,139 +0,0 @@
-# Provider configuration
-
-Garm was designed to be extensible. The database layer as well as the providers are defined as interfaces. Currently there are two providers:
-
-* [LXD](https://linuxcontainers.org/lxd/introduction/)
-* External
-
-LXD is the simplest cloud-like system you can easily set up on any GNU/Linux machine, which enables you to create both containers and Virtual Machines. The ```external``` provider is a special type of provider, which delegates functionality to external executables.
-
-## The LXD provider
-
-Garm leverages the virtual machines feature of LXD to create the runners. Here is a sample config section for an LXD provider:
-
-```toml
-# Currently, providers are defined statically in the config. This is due to the fact
-# that we have not yet added support for storing secrets in something like Barbican
-# or Vault. This will change in the future. However, for now, it's important to remember
-# that once you create a pool using one of the providers defined here, the name of that
-# provider must not be changes, or the pool will no longer work. Make sure you remove any
-# pools before removing or changing a provider.
-[[provider]]
- # An arbitrary string describing this provider.
- name = "lxd_local"
- # Provider type. Garm is designed to allow creating providers which are used to spin
- # up compute resources, which in turn will run the github runner software.
- # Currently, LXD is the only supported provider, but more will be written in the future.
- provider_type = "lxd"
- # A short description of this provider. The name, description and provider types will
- # be included in the information returned by the API when listing available providers.
- description = "Local LXD installation"
- [provider.lxd]
- # the path to the unix socket that LXD is listening on. This works if garm and LXD
- # are on the same system, and this option takes precedence over the "url" option,
- # which connects over the network.
- unix_socket_path = "/var/snap/lxd/common/lxd/unix.socket"
- # When defining a pool for a repository or an organization, you have an option to
- # specify a "flavor". In LXD terms, this translates to "profiles". Profiles allow
- # you to customize your instances (memory, cpu, disks, nics, etc).
- # This option allows you to inject the "default" profile along with the profile selected
- # by the flavor.
- include_default_profile = false
- # enable/disable secure boot. If the image you select for the pool does not have a
- # signed bootloader, set this to false, otherwise your instances won't boot.
- secure_boot = false
- # Project name to use. You can create a separate project in LXD for runners.
- project_name = "default"
- # URL is the address on which LXD listens for connections (ex: https://example.com:8443)
- url = ""
- # garm supports certificate authentication for LXD remote connections. The easiest way
- # to get the needed certificates, is to install the lxc client and add a remote. The
- # client_certificate, client_key and tls_server_certificate can be then fetched from
- # $HOME/snap/lxd/common/config.
- client_certificate = ""
- client_key = ""
- tls_server_certificate = ""
- [provider.lxd.image_remotes]
- # Image remotes are important. These are the default remotes used by lxc. The names
- # of these remotes are important. When specifying an "image" for the pool, that image
- # can be a hash of an existing image on your local LXD installation or it can be a
- # remote image from one of these remotes. You can specify the images as follows:
- # Example:
- #
- # * ubuntu:20.04
- # * ubuntu_daily:20.04
- # * images:centos/8/cloud
- #
- # Ubuntu images come pre-installed with cloud-init which we use to set up the runner
- # automatically and customize the runner. For non Ubuntu images, you need to use the
- # variant that has "/cloud" in the name. Those images come with cloud-init.
- [provider.lxd.image_remotes.ubuntu]
- addr = "https://cloud-images.ubuntu.com/releases"
- public = true
- protocol = "simplestreams"
- skip_verify = false
- [provider.lxd.image_remotes.ubuntu_daily]
- addr = "https://cloud-images.ubuntu.com/daily"
- public = true
- protocol = "simplestreams"
- skip_verify = false
- [provider.lxd.image_remotes.images]
- addr = "https://images.linuxcontainers.org"
- public = true
- protocol = "simplestreams"
- skip_verify = false
-```
-
-You can choose to connect to a local LXD server by using the ```unix_socket_path``` option, or you can connect to a remote LXD cluster/server by using the ```url``` option. If both are specified, the unix socket takes precedence. The config file is fairly well commented, but I will add a note about remotes.
-
-### LXD remotes
-
-By default, garm does not load any image remotes. You get to choose which remotes you add (if any). An image remote is a repository of images that LXD uses to create new instances, either virtual machines or containers. In the absence of any remote, garm will attempt to find the image you configure for a pool of runners, on the LXD server we're connecting to. If one is present, it will be used, otherwise it will fail and you will need to configure a remote.
-
-The sample config file in this repository has the usual default ```LXD``` remotes:
-
-* (ubuntu) - Official Ubuntu images
-* (ubuntu_daily) - Official Ubuntu images, daily build
-* (images) - Community maintained images for various operating systems
-
-When creating a new pool, you'll be able to specify which image you want to use. The images are referenced by ```remote_name:image_tag```. For example, if you want to launch a runner on an Ubuntu 20.04, the image name would be ```ubuntu:20.04```. For a daily image it would be ```ubuntu_daily:20.04```. And for one of the unofficial images it would be ```images:centos/8-Stream/cloud```. Note, for unofficial images you need to use the tags that have ```/cloud``` in the name. These images come pre-installed with ```cloud-init``` which we need to set up the runners automatically.
-
-You can also create your own image remote, where you can host your own custom images. If you want to build your own images, have a look at [distrobuilder](https://github.com/lxc/distrobuilder).
-
-Image remotes in the ```garm``` config, is a map of strings to remote settings. The name of the remote is the last bit of string in the section header. For example, the following section ```[provider.lxd.image_remotes.ubuntu_daily]```, defines the image remote named **ubuntu_daily**. Use this name to reference images inside that remote.
-
-## The External provider
-
-The external provider is a special kind of provider. It delegates the functionality needed to create the runners to external executables. These executables can be either binaries or scripts. As long as they adhere to the needed interface, they can be used to create runners in any target IaaS. This is identical to what ```containerd``` does with ```CNIs```.
-
-There is currently one external provider for [OpenStack](https://www.openstack.org/) available in the [contrib folder of this repository](../contrib/providers.d/openstack). The provider is written in ```bash``` and it is just a sample. A production ready provider would need more error checking and idempotency, but it serves as an example of what can be done. As it stands, it is functional.
-
-The configuration for an external provider is quite simple:
-
-```toml
-# This is an example external provider. External providers are executables that
-# implement the needed interface to create/delete/list compute systems that are used
-# by garm to create runners.
-[[provider]]
-name = "openstack_external"
-description = "external openstack provider"
-provider_type = "external"
- [provider.external]
- # config file passed to the executable via GARM_PROVIDER_CONFIG_FILE environment variable
- config_file = "/etc/garm/providers.d/openstack/keystonerc"
- # Absolute path to an executable that implements the provider logic. This executable can be
- # anything (bash, a binary, python, etc). See documentation in this repo on how to write an
- # external provider.
- provider_executable = "/etc/garm/providers.d/openstack/garm-external-provider"
-```
-
-The external provider has three options:
-
-* ```provider_executable```
-* ```config_file```
-
-The ```provider_executable``` option is the absolute path to an executable that implements the provider logic. Garm will delegate all provider operations to this executable. This executable can be anything (bash, python, perl, go, etc). See [Writing an external provider](./external_provider.md) for more details.
-
-The ```config_file``` option is a path on disk to an arbitrary file, that is passed to the external executable via the environment variable ```GARM_PROVIDER_CONFIG_FILE```. This file is only relevant to the external provider. Garm itself does not read it. In the case of the OpenStack provider, this file contains access information for an OpenStack cloud (what you would typically find in a ```keystonerc``` file) as well as some provider specific options like whether or not to boot from volume and which tenant network to use. You can check out the [sample config file](../contrib/providers.d/openstack/keystonerc) in this repository.
-
-If you want to implement an external provider, you can use this file for anything you need to pass into the binary when ```garm``` calls it to execute a particular operation.
diff --git a/doc/quickstart.md b/doc/quickstart.md
new file mode 100644
index 00000000..889f799b
--- /dev/null
+++ b/doc/quickstart.md
@@ -0,0 +1,628 @@
+# Quick start
+
+
+
+ - [Create the config folder](#create-the-config-folder)
+ - [The config file](#the-config-file)
+ - [The provider section](#the-provider-section)
+ - [Starting the service](#starting-the-service)
+ - [Using Docker](#using-docker)
+ - [Setting up GARM as a system service](#setting-up-garm-as-a-system-service)
+ - [Initializing GARM](#initializing-garm)
+ - [Setting up the webhook](#setting-up-the-webhook)
+ - [Creating a GitHub endpoint Optional](#creating-a-github-endpoint-optional)
+ - [Adding credentials](#adding-credentials)
+ - [Define a repo](#define-a-repo)
+ - [Create a pool](#create-a-pool)
+
+
+
+## Create the config folder
+
+All of our config files and data will be stored in `/etc/garm`. Let's create that folder:
+
+```bash
+sudo mkdir -p /etc/garm
+```
+
+Coincidentally, this is also where the docker container [looks for the config](/Dockerfile#L29) when it starts up. You can either use `Docker` or you can set up garm directly on your system. We'll walk you through both options. In both cases, we need to first create the config folder and a proper config file.
+
+## The config file
+
+There is a full config file, with detailed comments for each option, in the [testdata folder](/testdata/config.toml). You can use that as a reference. But for the purposes of this guide, we'll be using a minimal config file and add things on as we proceed.
+
+Open `/etc/garm/config.toml` in your favorite editor and paste the following:
+
+```toml
+[default]
+enable_webhook_management = true
+
+[logging]
+# If using nginx, you'll need to configure connection upgrade headers
+# for the /api/v1/ws location. See the sample config in the testdata
+# folder.
+enable_log_streamer = true
+# Set this to "json" if you want to consume these logs in something like
+# Loki or ELK.
+log_format = "text"
+log_level = "info"
+log_source = false
+
+[metrics]
+enable = true
+disable_auth = false
+
+[jwt_auth]
+# Obviously, this needs to be changed :).
+secret = ")9gk_4A6KrXz9D2u`0@MPea*sd6W`%@5MAWpWWJ3P3EqW~qB!!(Vd$FhNc*eU4vG"
+time_to_live = "8760h"
+
+[apiserver]
+ bind = "0.0.0.0"
+ port = 80
+ use_tls = false
+ [apiserver.webui]
+ # Set this to false if you want to disable the Web UI.
+ enable = true
+
+[database]
+ backend = "sqlite3"
+ # This needs to be changed.
+ passphrase = "shreotsinWadquidAitNefayctowUrph"
+ [database.sqlite3]
+ db_file = "/etc/garm/garm.db"
+```
+
+This is a minimal config, with no providers defined. In this example we have the [default](/doc/config.md#the-default-config-section), [logging](/doc/config.md#the-logging-section), [metrics](/doc/config.md#the-metrics-section), [jwt_auth](/doc/config.md#the-jwt-authentication-config-section), [apiserver](/doc/config.md#the-api-server-config-section) and [database](/doc/config.md#database-configuration) sections. Each are documented separately. Feel free to read through the available docs if, for example you need to enable TLS without using an nginx reverse proxy or if you want to enable the debug server, the log streamer or a log file.
+
+In this sample config we:
+
+* set up logging prefrences
+* enable metrics with authentication
+* set a JWT secret which is used to sign JWT tokens
+* set a time to live for the JWT tokens
+* enable the API server on port `80` and bind it to all interfaces
+* set the database backend to `sqlite3` and set a passphrase for sealing secrets (just webhook secrets for now)
+
+At this point, we have a valid config file, but we still need to add the `provider` section.
+
+## The provider section
+
+This is where you have a decision to make. GARM has a number of providers you can leverage. At the time of this writing, we have support for:
+
+* [OpenStack](https://github.com/cloudbase/garm-provider-openstack)
+* [Azure](https://github.com/cloudbase/garm-provider-azure)
+* [Kubernetes](https://github.com/mercedes-benz/garm-provider-k8s) - Thanks to the amazing folks at @mercedes-benz for sharing their awesome provider!
+* [LXD](https://github.com/cloudbase/garm-provider-lxd)
+* [Incus](https://github.com/cloudbase/garm-provider-incus)
+* [Equinix Metal](https://github.com/cloudbase/garm-provider-equinix)
+* [Amazon EC2](https://github.com/cloudbase/garm-provider-aws)
+* [Google Cloud Platform (GCP)](https://github.com/cloudbase/garm-provider-gcp)
+* [Oracle Cloud Infrastructure (OCI)](https://github.com/cloudbase/garm-provider-oci)
+
+The easiest provider to set up is probably the LXD or Incus provider. Incus is a fork of LXD so the functionality is identical (for now). For the purpose of this document, we'll continue with LXD. You don't need an account on an external cloud. You can just use your machine.
+
+You will need to have LXD installed and configured. There is an excellent [getting started guide](https://documentation.ubuntu.com/lxd/en/latest/getting_started/) for LXD. Follow the instructions there to install and configure LXD, then come back here.
+
+Once you have LXD installed and configured, you can add the provider section to your config file. If you're connecting to the `local` LXD installation, the [config snippet for the LXD provider](https://github.com/cloudbase/garm-provider-lxd/blob/4ee4e6fc579da4a292f40e0f7deca1e396e223d0/testdata/garm-provider-lxd.toml) will work out of the box. We'll be connecting using the unix socket so no further configuration will be needed.
+
+Go ahead and create a new config in a location where GARM can access it and paste that entire snippet. For the purposes of this doc, we'll assume you created a new file called `/etc/garm/garm-provider-lxd.toml`. That config file will be used by the provider itself. Remember, the providers are external executables that are called by GARM. They have their own configs which are relevant only to those executables, not GARM itself.
+
+We now need to define the provider in the GARM config file and tell GARM how it can find both the provider binary and the provider specific config file. To do that, open the GARM config file `/etc/garm/config.toml` in your favorite editor and paste the following config snippet at the end:
+
+```toml
+[[provider]]
+ name = "lxd_local"
+ provider_type = "external"
+ description = "Local LXD installation"
+ [provider.external]
+ provider_executable = "/opt/garm/providers.d/garm-provider-lxd"
+ config_file = "/etc/garm/garm-provider-lxd.toml"
+```
+
+This config snippet assumes that the LXD provider executable is available, or is going to be available in `/opt/garm/providers.d/garm-provider-lxd`. If you're using the container image, the executable is already there. If you're installing GARM as a systemd service, don't worry, instructions on how to get the LXD provider executable are coming up.
+
+## Starting the service
+
+You can start GARM using docker or directly on your system. I'll show you both ways.
+
+### Using Docker
+
+If you're using docker, you can start the service with:
+
+```bash
+docker run -d \
+ --name garm \
+ -p 80:80 \
+ -v /etc/garm:/etc/garm:rw \
+ -v /var/snap/lxd/common/lxd/unix.socket:/var/snap/lxd/common/lxd/unix.socket:rw \
+ ghcr.io/cloudbase/garm:v0.1.6
+```
+
+You will notice that we also mounted the LXD unix socket from the host inside the container where the config you pasted expects to find it. If you plan to use an external provider that does not need to connect to LXD over a unix socket, feel free to remove that mount.
+
+Check the logs to make sure everything is working as expected:
+
+```bash
+ubuntu@garm:~$ docker logs garm
+signal.NotifyContext(context.Background, [interrupt terminated])
+2023/07/17 21:55:43 Loading provider lxd_local
+2023/07/17 21:55:43 registering prometheus metrics collectors
+2023/07/17 21:55:43 setting up metric routes
+```
+
+### Setting up GARM as a system service
+
+This process is a bit more involved. We'll need to create a new user for garm and set up permissions for that user to connect to LXD.
+
+First, create the user:
+
+```bash
+useradd --shell /usr/bin/false \
+ --system \
+ --groups lxd \
+ --no-create-home garm
+```
+
+Adding the `garm` user to the LXD group will allow it to connect to the LXD unix socket. We'll need that considering the config we crafted above. The recommendation is to use TCP connections to connect to a remote LXD installation. The local setup of an LXD provider is just for demonstration purposes/testing.
+
+Next, download the latest release from the [releases page](https://github.com/cloudbase/garm/releases).
+
+```bash
+wget -q -O - https://github.com/cloudbase/garm/releases/download/v0.1.6/garm-linux-amd64.tgz | tar xzf - -C /usr/local/bin/
+```
+
+We'll be running under an unprivileged user. If we want to be able to listen on any port under `1024`, we'll have to set some capabilities on the binary:
+
+```bash
+setcap cap_net_bind_service=+ep /usr/local/bin/garm
+```
+
+Create a folder for the external providers:
+
+```bash
+sudo mkdir -p /opt/garm/providers.d
+```
+
+Download the LXD provider binary:
+
+```bash
+git clone https://github.com/cloudbase/garm-provider-lxd
+cd garm-provider-lxd
+go build -o /opt/garm/providers.d/garm-provider-lxd
+```
+
+Change the permissions on the config dir:
+
+```bash
+chown -R garm:garm /etc/garm
+```
+
+Copy the sample `systemd` service file:
+
+```bash
+wget -O /etc/systemd/system/garm.service \
+ https://raw.githubusercontent.com/cloudbase/garm/v0.1.6/contrib/garm.service
+```
+
+Reload the `systemd` daemon and start the service:
+
+```bash
+systemctl daemon-reload
+systemctl start garm
+```
+
+Check the logs to make sure everything is working as expected:
+
+```bash
+ubuntu@garm:~$ sudo journalctl -u garm
+```
+
+Check that you can make a request to the API:
+
+```bash
+ubuntu@garm:~$ docker logs garm
+signal.NotifyContext(context.Background, [interrupt terminated])
+2023/07/17 22:21:33 Loading provider lxd_local
+2023/07/17 22:21:33 registering prometheus metrics collectors
+2023/07/17 22:21:33 setting up metric routes
+```
+
+Excellent! We have a working GARM installation. Now we need to initialize the controller and set up the webhook in GitHub.
+
+## Initializing GARM
+
+Before we can start using GARM, we need initialize it. This will create the `admin` user and generate a unique controller ID that will identify this GARM installation. This process allows us to use multiple GARM installations with the same GitHub account, if we want or need to. GARM will use the controller ID to identify the runners it creates. This way we won't run the risk of accidentally removing runners we don't manage.
+
+To initialize GARM, we'll use the `garm-cli` tool. You can download the latest release from the [releases page](https://github.com/cloudbase/garm/releases):
+
+```bash
+wget -q -O - https://github.com/cloudbase/garm/releases/download/v0.1.6/garm-cli-linux-amd64.tgz | tar xzf - -C /usr/local/bin/
+```
+
+Now we can initialize GARM:
+
+```bash
+ubuntu@garm:~$ garm-cli init --name="local_garm" --url http://garm.example.com
+Username: admin
+Email: admin@garm.example.com
+✔ Password: ************█
+✔ Confirm password: ************█
+Congrats! Your controller is now initialized.
+
+Following are the details of the admin user and details about the controller.
+
+Admin user information:
+
++----------+--------------------------------------+
+| FIELD | VALUE |
++----------+--------------------------------------+
+| ID | 6b0d8f67-4306-4702-80b6-eb0e2e4ee695 |
+| Username | admin |
+| Email | admin@garm.example.com |
+| Enabled | true |
++----------+--------------------------------------+
+
+Controller information:
+
++------------------------+-----------------------------------------------------------------------+
+| FIELD | VALUE |
++------------------------+-----------------------------------------------------------------------+
+| Controller ID | 0c54fd66-b78b-450a-b41a-65af2fd0f71b |
+| Metadata URL | http://garm.example.com/api/v1/metadata |
+| Callback URL | http://garm.example.com/api/v1/callbacks |
+| Webhook Base URL | http://garm.example.com/webhooks |
+| Controller Webhook URL | http://garm.example.com/webhooks/0c54fd66-b78b-450a-b41a-65af2fd0f71b |
++------------------------+-----------------------------------------------------------------------+
+
+Make sure that the URLs in the table above are reachable by the relevant parties.
+
+The metadata and callback URLs *must* be accessible by the runners that GARM spins up.
+The base webhook and the controller webhook URLs must be accessible by GitHub or GHES.
+```
+
+Every time you init a new GARM instance, a new profile will be created in your local `garm-cli` config. You can also log into an already initialized instance using:
+
+```bash
+garm-cli profile add \
+ --name="another_garm" \
+ --url https://garm2.example.com
+```
+
+Then you can switch between profiles using:
+
+```bash
+garm-cli profile switch another_garm
+```
+
+## Setting up the webhook
+
+There are two options when it comes to setting up the webhook in GitHub. You can manually set up the webhook in the GitHub UI, and then use the resulting secret when creating the entity (repo, org, enterprise), or you can let GARM do it automatically if the app or PAT you're using has the [required privileges](/doc/github_credentials.md).
+
+If you want to manually set up the webhooks, have a look at the [webhooks doc](/doc/webhooks.md) for more information.
+
+In this guide, I'll show you how to do it automatically when adding a new repo, assuming you have the required privileges. Note, you'll still have to manually set up webhooks if you want to use GARM at the enterprise level. Automatic webhook management is only available for repos and orgs.
+
+## Creating a GitHub endpoint (Optional)
+
+This section is only of interest if you're using a GitHub Enterprise Server (GHES) deployment. If you're using [github.com](https://github.com), you can skip this section.
+
+Let's list existing endpoints:
+
+```bash
+gabriel@rossak:~$ garm-cli github endpoint list
++------------+--------------------+-------------------------+
+| NAME | BASE URL | DESCRIPTION |
++------------+--------------------+-------------------------+
+| github.com | https://github.com | The github.com endpoint |
++------------+--------------------+-------------------------+
+```
+
+By default, GARM creates a default `github.com` endpoint. This endpoint cannot be updated or deleted. If you want to add a new endpoint, you can do so using the `github endpoint create` command:
+
+```bash
+garm-cli github endpoint create \
+ --name example \
+ --description "Just an example ghes endpoint" \
+ --base-url https://ghes.example.com \
+ --upload-url https://upload.ghes.example.com \
+ --api-base-url https://api.ghes.example.com \
+ --ca-cert-path $HOME/ca-cert.pem
+```
+
+In this exampe, we add a new github endpoint called `example`. The `ca-cert-path` is optional and is used to verify the server's certificate. If you don't provide a path, GARM will use the system's default CA certificates.
+
+## Adding credentials
+
+Before we can add a new entity, we need github credentials to interact with that entity (manipulate runners, create webhooks, etc). Credentials are tied to a specific github endpoint. In this section we'll be adding credentials that are valid for either [github.com](https://github.com) or your own GHES server (if you added one in the previous section).
+
+When creating a new entity (repo, org, enterprise) using the credentials you define here, GARM will automatically associate that entity with the github endpoint that the credentials use.
+
+If you want to swap the credentials for an entity, the new credentials will need to be associated with the same endpoint as the old credentials.
+
+Let's add some credentials:
+
+```bash
+garm-cli github credentials add \
+ --name gabriel \
+ --description "GitHub PAT for user gabriel" \
+ --auth-type pat \
+ --pat-oauth-token gh_theRestOfThePAT \
+ --endpoint github.com
+```
+
+You can also add a GitHub App as credentials. The process is similar, but you'll need to provide the `app_id`, `private_key_path` and `installation_id`:
+
+```bash
+garm-cli github credentials add \
+ --name gabriel_app \
+ --description "Github App with access to repos" \
+ --endpoint github.com \
+ --auth-type app \
+ --app-id 1 \
+ --app-installation-id 99 \
+ --private-key-path $HOME/yourAppName.2024-03-01.private-key.pem
+```
+
+All sensitive info is encrypted at rest. Also, the API will not return sensitive data.
+
+## Define a repo
+
+We now have a working GARM installation, with github credentials and a provider added. It's time to add a repo.
+
+Before we add a repo, let's list credentials. We'll need their names when we'll add a new repo.
+
+```bash
+ubuntu@garm:~$ garm-cli github credentials list
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| ID | NAME | DESCRIPTION | BASE URL | API URL | UPLOAD URL | TYPE |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| 1 | gabriel | GitHub PAT for user gabriel | https://github.com | https://api.github.com/ | https://uploads.github.com/ | pat |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| 2 | gabriel_app | Github App with access to repos | https://github.com | https://api.github.com/ | https://uploads.github.com/ | app |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+```
+
+Now we can add a repo:
+
+```bash
+garm-cli repo add \
+ --owner gsamfira \
+ --name scripts \
+ --credentials gabriel \
+ --random-webhook-secret \
+ --install-webhook \
+ --pool-balancer-type roundrobin
+```
+
+This will add a new repo called `scripts` under the `gsamfira` org. We also tell GARM to generate a random secret and install a webhook using that random secret. If you want to use a specific secret, you can use the `--webhook-secret` option, but in that case, you'll have to manually set up the webhook in GitHub.
+
+The `--pool-balancer-type` option is used to set the pool balancer type. That dictates how GARM will choose in which pool it should create a new runner when consuming recorded queued jobs. If `roundrobin` (default) is used, GARM will cycle through all pools and create a runner in the first pool that has available resources. If `pack` is used, GARM will try to fill up a pool before moving to the next one. The order of the pools is determined by the pool priority. We'll see more about pools in the next section.
+
+You should see something like this:
+
+```bash
+gabriel@rossak:~$ garm-cli repo add \
+ --name scripts \
+ --credentials gabriel_org \
+ --install-webhook \
+ --random-webhook-secret \
+ --owner gsamfira \
+ --pool-balancer-type roundrobin
++----------------------+--------------------------------------+
+| FIELD | VALUE |
++----------------------+--------------------------------------+
+| ID | 0c91d9fd-2417-45d4-883c-05daeeaa8272 |
+| Owner | gsamfira |
+| Name | scripts |
+| Pool balancer type | roundrobin |
+| Credentials | gabriel_app |
+| Pool manager running | true |
++----------------------+--------------------------------------+
+```
+
+We can now list the repos:
+
+```bash
+gabriel@rock:~$ garm-cli repo ls
++--------------------------------------+----------+--------------+------------------+--------------------+------------------+
+| ID | OWNER | NAME | CREDENTIALS NAME | POOL BALANCER TYPE | POOL MGR RUNNING |
++--------------------------------------+----------+--------------+------------------+--------------------+------------------+
+| 0c91d9fd-2417-45d4-883c-05daeeaa8272 | gsamfira | scripts | gabriel | roundrobin | true |
++--------------------------------------+----------+--------------+------------------+--------------------+------------------+
+```
+
+Excellent! Make a note of the ID. We'll need it later when we create a pool.
+
+## Create a pool
+
+This is the last step. You're almost there!
+
+To create a pool we'll need the repo ID from the previous step (which we have) and a provider in which the pool will spin up new runners. We'll use the LXD provider we defined earlier, but we need its name:
+
+```bash
+gabriel@rossak:~$ garm-cli provider list
++-----------+------------------------+-----------+
+| NAME | DESCRIPTION | TYPE |
++-----------+------------------------+-----------+
+| lxd_local | Local LXD installation | external |
++-----------+------------------------+-----------+
+```
+
+Now we can create a pool:
+
+```bash
+garm-cli pool add \
+ --repo 0c91d9fd-2417-45d4-883c-05daeeaa8272 \
+ --enabled true \
+ --provider-name lxd_local \
+ --flavor default \
+ --image ubuntu:22.04 \
+ --max-runners 5 \
+ --min-idle-runners 0 \
+ --os-arch amd64 \
+ --os-type linux \
+ --tags ubuntu,generic
+```
+
+You should see something like this:
+
+```bash
+gabriel@rossak:~$ garm-cli pool add \
+> --repo 0c91d9fd-2417-45d4-883c-05daeeaa8272 \
+> --enabled true \
+> --provider-name lxd_local \
+> --flavor default \
+> --image ubuntu:22.04 \
+> --max-runners 5 \
+> --min-idle-runners 0 \
+> --os-arch amd64 \
+> --os-type linux \
+> --tags ubuntu,generic
++--------------------------+--------------------------------------------+
+| FIELD | VALUE |
++--------------------------+--------------------------------------------+
+| ID | 344e4a72-2035-4a18-a3d5-87bd3874b56c |
+| Provider Name | lxd_local |
+| Priority | 0 |
+| Image | ubuntu:22.04 |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 5 |
+| Min Idle Runners | 0 |
+| Runner Bootstrap Timeout | 20 |
+| Tags | ubuntu, generic |
+| Belongs to | gsamfira/scripts |
+| Level | repo |
+| Enabled | true |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | |
++--------------------------+--------------------------------------------+
+```
+
+If we list the pool we should see it:
+
+```bash
+gabriel@rock:~$ garm-cli pool ls
++--------------------------------------+---------------------------+--------------+-----------------+------------------+-------+---------+---------------+----------+
+| ID | IMAGE | FLAVOR | TAGS | BELONGS TO | LEVEL | ENABLED | RUNNER PREFIX | PRIORITY |
++--------------------------------------+---------------------------+--------------+-----------------+------------------+-------+---------+---------------+----------+
+| 344e4a72-2035-4a18-a3d5-87bd3874b56c | ubuntu:22.04 | default | ubuntu generic | gsamfira/scripts | repo | true | garm | 0 |
++--------------------------------------+---------------------------+--------------+-----------------+------------------+-------+---------+---------------+----------+
+```
+
+This pool is enabled, but the `min-idle-runners` option is set to 0. This means that it will not create any idle runners. It will only create runners when a job is started and a webhook is sent to our GARM server. Optionally, you can set `min-idle-runners` to a value greater than 0, but keep in mind that depending on the provider you use, this may incur cost.
+
+For the purposes of this guide, we'll increase it to 1 so we have a runner created.
+
+First, list current runners:
+
+```bash
+gabriel@rossak:~$ garm-cli runner ls
++----+------+--------+---------------+---------+
+| NR | NAME | STATUS | RUNNER STATUS | POOL ID |
++----+------+--------+---------------+---------+
++----+------+--------+---------------+---------+
+```
+
+No runners. Now, let's update the pool and set `min-idle-runners` to 1:
+
+```bash
+gabriel@rossak:~$ garm-cli pool update 344e4a72-2035-4a18-a3d5-87bd3874b56c --min-idle-runners=1
++--------------------------+--------------------------------------------+
+| FIELD | VALUE |
++--------------------------+--------------------------------------------+
+| ID | 344e4a72-2035-4a18-a3d5-87bd3874b56c |
+| Provider Name | lxd_local |
+| Priority | 0 |
+| Image | ubuntu:22.04 |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 5 |
+| Min Idle Runners | 1 |
+| Runner Bootstrap Timeout | 20 |
+| Tags | ubuntu, generic |
+| Belongs to | gsamfira/scripts |
+| Level | repo |
+| Enabled | true |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | |
++--------------------------+--------------------------------------------+
+```
+
+Now if we list the runners:
+
+```bash
+gabriel@rossak:~$ garm-cli runner ls
++----+-------------------+----------------+---------------+--------------------------------------+
+| NR | NAME | STATUS | RUNNER STATUS | POOL ID |
++----+-------------------+----------------+---------------+--------------------------------------+
+| 1 | garm-tdtD6zpsXhj1 | pending_create | pending | 344e4a72-2035-4a18-a3d5-87bd3874b56c |
++----+-------------------+----------------+---------------+--------------------------------------+
+```
+
+If we check our LXD, we should also see it there as well:
+
+```bash
+gabriel@rossak:~$ lxc list
++-------------------+---------+---------------------+------+-----------+-----------+
+| NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS |
++-------------------+---------+---------------------+------+-----------+-----------+
+| garm-tdtD6zpsXhj1 | RUNNING | 10.44.30.155 (eth0) | | CONTAINER | 0 |
++-------------------+---------+---------------------+------+-----------+-----------+
+```
+
+If we wait for a bit and run:
+
+```bash
+gabriel@rossak:~$ garm-cli runner show garm-tdtD6zpsXhj1
++-----------------+------------------------------------------------------------------------------------------------------+
+| FIELD | VALUE |
++-----------------+------------------------------------------------------------------------------------------------------+
+| ID | 7ac024c9-1854-4911-9859-d061059244a6 |
+| Provider ID | garm-tdtD6zpsXhj1 |
+| Name | garm-tdtD6zpsXhj1 |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| OS Name | ubuntu |
+| OS Version | jammy |
+| Status | running |
+| Runner Status | idle |
+| Pool ID | 344e4a72-2035-4a18-a3d5-87bd3874b56c |
+| Addresses | 10.44.30.155 |
+| Status Updates | 2023-07-18T14:32:26: runner registration token was retrieved |
+| | 2023-07-18T14:32:26: downloading tools from https://github.com/actions/runner/releases/download/v2.3 |
+| | 06.0/actions-runner-linux-amd64-2.306.0.tar.gz |
+| | 2023-07-18T14:32:30: extracting runner |
+| | 2023-07-18T14:32:36: installing dependencies |
+| | 2023-07-18T14:33:03: configuring runner |
+| | 2023-07-18T14:33:14: runner successfully configured after 1 attempt(s) |
+| | 2023-07-18T14:33:14: installing runner service |
+| | 2023-07-18T14:33:15: starting service |
+| | 2023-07-18T14:33:16: runner successfully installed |
++-----------------+------------------------------------------------------------------------------------------------------+
+```
+
+We can see the runner getting installed and phoning home with status updates. You should now see it in your GitHub repo under `Settings --> Actions --> Runners`.
+
+You can also target this runner using one or more of its labels. In this case, we can target it using `ubuntu` or `generic`.
+
+You can also view jobs sent to your garm instance using the `garm-cli job ls` command:
+
+```bash
+gabriel@rossak:~$ garm-cli job ls
++----+------+--------+------------+-------------+------------+------------------+-----------+
+| ID | NAME | STATUS | CONCLUSION | RUNNER NAME | REPOSITORY | REQUESTED LABELS | LOCKED BY |
++----+------+--------+------------+-------------+------------+------------------+-----------+
++----+------+--------+------------+-------------+------------+------------------+-----------+
+```
+
+There are no jobs sent yet to my GARM install, but once you start sending jobs, you'll see them here as well.
+
+That's it! Now you have a working GARM installation. You can add more repos, orgs or enterprises and create more pools. You can also add more providers for different clouds and credentials with access to different GitHub resources.
+
+Check out the [Using GARM](/doc/using_garm.md) guide for more details on how to use GARM.
diff --git a/doc/running_garm.md b/doc/running_garm.md
deleted file mode 100644
index 4ac623b8..00000000
--- a/doc/running_garm.md
+++ /dev/null
@@ -1,391 +0,0 @@
-# Running garm
-
-Create a folder for the config:
-
- ```bash
- mkdir $HOME/garm
- ```
-
-Create a config file for ```garm```:
-
- ```bash
- cp ./testdata/config.toml $HOME/garm/config.toml
- ```
-
-Customize the config whichever way you want, then run ```garm```:
-
- ```bash
- garm -config $HOME/garm/config.toml
- ```
-
-This will start the API and migrate the database. Note, if you're using MySQL, you will need to create a database, grant access to a user and configure those credentials in the ```config.toml``` file.
-
-## First run
-
-Before you can use ```garm```, you need to initialize it. This means we need to create an admin user, and login:
-
- ```bash
- ubuntu@experiments:~$ garm-cli init --name="local_garm" --url https://garm.example.com
- Username: admin
- Email: root@localhost
- ✔ Password: *************█
- +----------+--------------------------------------+
- | FIELD | VALUE |
- +----------+--------------------------------------+
- | ID | ef4ab6fd-1252-4d5a-ba5a-8e8bd01610ae |
- | Username | admin |
- | Email | root@localhost |
- | Enabled | true |
- +----------+--------------------------------------+
- ```
-
-Alternatively you can run this in non-interactive mode. See ```garm-cli init -h``` for details.
-
-## Enabling bash completion
-
-Before we begin, let's make our lives a little easier and set up bash completion. The wonderful [cobra](https://github.com/spf13/cobra) library gives us completion for free:
-
- ```bash
- mkdir $HOME/.bash_completion.d
- echo 'source $HOME/.bash_completion.d/* >/dev/null 2>&1|| true' >> $HOME/.bash_completion
- ```
-
-Now generate the completion file:
-
- ```bash
- garm-cli completion bash > $HOME/.bash_completion.d/garm
- ```
-
-Completion for multiple shells is available:
-
- ```bash
- ubuntu@experiments:~$ garm-cli completion
- Generate the autocompletion script for garm-cli for the specified shell.
- See each sub-command's help for details on how to use the generated script.
-
- Usage:
- garm-cli completion [command]
-
- Available Commands:
- bash Generate the autocompletion script for bash
- fish Generate the autocompletion script for fish
- powershell Generate the autocompletion script for powershell
- zsh Generate the autocompletion script for zsh
-
- Flags:
- -h, --help help for completion
-
- Global Flags:
- --debug Enable debug on all API calls
-
- Use "garm-cli completion [command] --help" for more information about a command.
- ```
-
-## Adding a repository/organization/enterprise
-
-To add a repository, we need credentials. Let's list the available credentials currently configured. These credentials are added to ```garm``` using the config file (see above), but we need to reference them by name when creating a repo.
-
- ```bash
- ubuntu@experiments:~$ garm-cli credentials list
- +---------+------------------------------+
- | NAME | DESCRIPTION |
- +---------+------------------------------+
- | gabriel | github token or user gabriel |
- +---------+------------------------------+
- ```
-
-Now we can add a repository to ```garm```:
-
- ```bash
- ubuntu@experiments:~$ garm-cli repository create \
- --credentials=gabriel \
- --owner=gabriel-samfira \
- --name=scripts \
- --webhook-secret="super secret webhook secret you configured in github webhooks"
- +-------------+--------------------------------------+
- | FIELD | VALUE |
- +-------------+--------------------------------------+
- | ID | 77258e1b-81d2-4821-bdd7-f6923a026455 |
- | Owner | gabriel-samfira |
- | Name | scripts |
- | Credentials | gabriel |
- +-------------+--------------------------------------+
- ```
-
-To add an organization, use the following command:
-
- ```bash
- ubuntu@experiments:~$ garm-cli organization create \
- --credentials=gabriel \
- --name=gsamfira \
- --webhook-secret="$SECRET"
- +-------------+--------------------------------------+
- | FIELD | VALUE |
- +-------------+--------------------------------------+
- | ID | 7f0b83d5-3dc0-42de-b189-f9bbf1ae8901 |
- | Name | gsamfira |
- | Credentials | gabriel |
- +-------------+--------------------------------------+
- ```
-
-To add an enterprise, use the following command:
-
- ```bash
- ubuntu@experiments:~$ garm-cli enterprise create \
- --credentials=gabriel \
- --name=gsamfira \
- --webhook-secret="$SECRET"
- +-------------+--------------------------------------+
- | FIELD | VALUE |
- +-------------+--------------------------------------+
- | ID | 0925033b-049f-4334-a460-c26f979d2356 |
- | Name | gsamfira |
- | Credentials | gabriel |
- +-------------+--------------------------------------+
- ```
-
-## Creating a pool
-
-Pools are objects that define one type of worker and rules by which that pool of workers will be maintained. You can have multiple pools of different types of instances. Each pool can have different images, be on different providers and have different tags.
-
-Before we can create a pool, we need to list the available providers. Providers are defined in the config (see above), but we need to reference them by name in the pool.
-
- ```bash
- ubuntu@experiments:~$ garm-cli provider list
- +-----------+------------------------+------+
- | NAME | DESCRIPTION | TYPE |
- +-----------+------------------------+------+
- | lxd_local | Local LXD installation | lxd |
- +-----------+------------------------+------+
- ```
-
-Now we can create a pool for repo ```gabriel-samfira/scripts```:
-
- ```bash
- ubuntu@experiments:~$ garm-cli pool add \
- --repo=77258e1b-81d2-4821-bdd7-f6923a026455 \
- --flavor="default" \
- --image="ubuntu:20.04" \
- --provider-name="lxd_local" \
- --tags="ubuntu,simple-runner,repo-runner" \
- --enabled=false
- +------------------+-------------------------------------------------------------+
- | FIELD | VALUE |
- +------------------+-------------------------------------------------------------+
- | ID | fb25f308-7ad2-4769-988e-6ec2935f642a |
- | Provider Name | lxd_local |
- | Image | ubuntu:20.04 |
- | Flavor | default |
- | OS Type | linux |
- | OS Architecture | amd64 |
- | Max Runners | 5 |
- | Min Idle Runners | 1 |
- | Tags | ubuntu, simple-runner, repo-runner, self-hosted, x64, linux |
- | Belongs to | gabriel-samfira/scripts |
- | Level | repo |
- | Enabled | false |
- +------------------+-------------------------------------------------------------+
- ```
-
-There are a bunch of things going on here, so let's break it down. We created a pool for repo ```gabriel-samfira/scripts``` (identified by the ID ```77258e1b-81d2-4821-bdd7-f6923a026455```). This pool has the following characteristics:
-
-* flavor=default - The **flavor** describes the hardware aspects of an instance. In LXD terms, this translates to [profiles](https://linuxcontainers.org/lxd/docs/master/profiles/). In LXD, profiles describe how much memory, CPU, NICs and disks a particular instance will get. Much like the flavors in OpenStack or any public cloud provider
-* image=ubuntu:20.04 - The image describes the operating system that will be spun up on the provider. LXD fetches these images from one of the configured remotes, or from the locally cached images. On AWS, this would be an AMI (for example).
-* provider-name=lxd_local - This is the provider on which we'll be spinning up runners. You can have as many providers defined as you wish, and you can reference either one of them when creating a pool.
-* tags="ubuntu,simple-runner,repo-runner" - This list of tags will be added to all runners maintained by this pool. These are the tags you can use to target these runners in your workflows. By default, the github runner will automatically add a few default tags (self-hosted, x64, linux in the above example)
-* enabled=false - This option creates the pool in **disabled** state. When disabled, no new runners will be spun up.
-
-By default, a pool is created with a max worker count of ```5``` and a minimum idle runner count of ```1```. This means that this pool will create by default one runner, and will automatically add more, as jobs are triggered on github. The idea is to have at least one runner ready to accept a workflow job. The pool will keep adding workers until the max runner count is reached. Once a workflow job is complete, the runner is automatically deleted, and replaced.
-
-To update the pool, we cam use the following command:
-
- ```bash
- ubuntu@experiments:~$ garm-cli pool update fb25f308-7ad2-4769-988e-6ec2935f642a --enabled=true
- +------------------+-------------------------------------------------------------+
- | FIELD | VALUE |
- +------------------+-------------------------------------------------------------+
- | ID | fb25f308-7ad2-4769-988e-6ec2935f642a |
- | Provider Name | lxd_local |
- | Image | ubuntu:20.04 |
- | Flavor | default |
- | OS Type | linux |
- | OS Architecture | amd64 |
- | Max Runners | 5 |
- | Min Idle Runners | 1 |
- | Tags | ubuntu, simple-runner, repo-runner, self-hosted, x64, linux |
- | Belongs to | gabriel-samfira/scripts |
- | Level | repo |
- | Enabled | true |
- +------------------+-------------------------------------------------------------+
- ```
-
-Now, if we list the runners, we should see one being created:
-
- ```bash
- ubuntu@experiments:~$ garm-cli runner ls fb25f308-7ad2-4769-988e-6ec2935f642a
- +-------------------------------------------+----------------+---------------+--------------------------------------+
- | NAME | STATUS | RUNNER STATUS | POOL ID |
- +-------------------------------------------+----------------+---------------+--------------------------------------+
- | garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe | pending_create | pending | fb25f308-7ad2-4769-988e-6ec2935f642a |
- +-------------------------------------------+----------------+---------------+--------------------------------------+
- ```
-
-We can also do a show on that runner to get more info:
-
- ```bash
- ubuntu@experiments:~$ garm-cli runner show garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe
- +-----------------+-------------------------------------------+
- | FIELD | VALUE |
- +-----------------+-------------------------------------------+
- | ID | 089d63c9-5567-4318-a3a6-e065685c975b |
- | Provider ID | garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe |
- | Name | garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe |
- | OS Type | linux |
- | OS Architecture | amd64 |
- | OS Name | ubuntu |
- | OS Version | focal |
- | Status | running |
- | Runner Status | pending |
- | Pool ID | fb25f308-7ad2-4769-988e-6ec2935f642a |
- +-----------------+-------------------------------------------+
- ```
-
-If we check out LXD, we can see the instance was created and is currently being bootstrapped:
-
- ```bash
- ubuntu@experiments:~$ lxc list
- +-------------------------------------------+---------+-------------------------+------+-----------------+-----------+
- | NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS |
- +-------------------------------------------+---------+-------------------------+------+-----------------+-----------+
- | garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe | RUNNING | 10.247.246.219 (enp5s0) | | VIRTUAL-MACHINE | 0 |
- +-------------------------------------------+---------+-------------------------+------+-----------------+-----------+
- ```
-
-It might take a couple of minutes for the runner to come online, as the instance will do a full upgrade, then download the runner and install it. But once the installation is done you should see something like this:
-
- ```bash
- ubuntu@experiments:~$ garm-cli runner show garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe
- +-----------------+--------------------------------------------------------------------------------------------------------------------------------------------------+
- | FIELD | VALUE |
- +-----------------+--------------------------------------------------------------------------------------------------------------------------------------------------+
- | ID | 089d63c9-5567-4318-a3a6-e065685c975b |
- | Provider ID | garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe |
- | Name | garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe |
- | OS Type | linux |
- | OS Architecture | amd64 |
- | OS Name | ubuntu |
- | OS Version | focal |
- | Status | running |
- | Runner Status | idle |
- | Pool ID | fb25f308-7ad2-4769-988e-6ec2935f642a |
- | Status Updates | 2022-05-06T13:21:54: downloading tools from https://github.com/actions/runner/releases/download/v2.291.1/actions-runner-linux-x64-2.291.1.tar.gz |
- | | 2022-05-06T13:21:56: extracting runner |
- | | 2022-05-06T13:21:58: installing dependencies |
- | | 2022-05-06T13:22:07: configuring runner |
- | | 2022-05-06T13:22:12: installing runner service |
- | | 2022-05-06T13:22:12: starting service |
- | | 2022-05-06T13:22:13: runner successfully installed |
- +-----------------+--------------------------------------------------------------------------------------------------------------------------------------------------+
- ```
-
-If we list the runners for this pool, we should see one runner with a ```RUNNER STATUS``` of ```idle```:
-
- ```bash
- ubuntu@experiments:~$ garm-cli runner ls fb25f308-7ad2-4769-988e-6ec2935f642a
- +-------------------------------------------+---------+---------------+--------------------------------------+
- | NAME | STATUS | RUNNER STATUS | POOL ID |
- +-------------------------------------------+---------+---------------+--------------------------------------+
- | garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe | running | idle | fb25f308-7ad2-4769-988e-6ec2935f642a |
- +-------------------------------------------+---------+---------------+--------------------------------------+
- ```
-
-## Updating a pool
-
-Let's update the pool and request that it maintain a number of minimum idle runners equal to 3:
-
- ```bash
- ubuntu@experiments:~$ garm-cli pool update fb25f308-7ad2-4769-988e-6ec2935f642a \
- --min-idle-runners=3 \
- --max-runners=10
- +------------------+----------------------------------------------------------------------------------+
- | FIELD | VALUE |
- +------------------+----------------------------------------------------------------------------------+
- | ID | fb25f308-7ad2-4769-988e-6ec2935f642a |
- | Provider Name | lxd_local |
- | Image | ubuntu:20.04 |
- | Flavor | default |
- | OS Type | linux |
- | OS Architecture | amd64 |
- | Max Runners | 10 |
- | Min Idle Runners | 3 |
- | Tags | ubuntu, simple-runner, repo-runner, self-hosted, x64, linux |
- | Belongs to | gabriel-samfira/scripts |
- | Level | repo |
- | Enabled | true |
- | Instances | garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe (089d63c9-5567-4318-a3a6-e065685c975b) |
- +------------------+----------------------------------------------------------------------------------+
- ```
-
-Now if we list runners we should see 2 more in ```pending``` state:
-
- ```bash
- ubuntu@experiments:~$ garm-cli runner ls fb25f308-7ad2-4769-988e-6ec2935f642a
- +-------------------------------------------+---------+---------------+--------------------------------------+
- | NAME | STATUS | RUNNER STATUS | POOL ID |
- +-------------------------------------------+---------+---------------+--------------------------------------+
- | garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe | running | idle | fb25f308-7ad2-4769-988e-6ec2935f642a |
- +-------------------------------------------+---------+---------------+--------------------------------------+
- | garm-bc180c6c-6e31-4c7b-8ce1-da0ffd76e247 | running | pending | fb25f308-7ad2-4769-988e-6ec2935f642a |
- +-------------------------------------------+---------+---------------+--------------------------------------+
- | garm-37c5daf4-18c5-47fc-95de-8c1656889093 | running | pending | fb25f308-7ad2-4769-988e-6ec2935f642a |
- +-------------------------------------------+---------+---------------+--------------------------------------+
- ```
-
-We can see them in LXC as well:
-
- ```bash
- ubuntu@experiments:~$ lxc list
- +-------------------------------------------+---------+-------------------------+------+-----------------+-----------+
- | NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS |
- +-------------------------------------------+---------+-------------------------+------+-----------------+-----------+
- | garm-37c5daf4-18c5-47fc-95de-8c1656889093 | RUNNING | | | VIRTUAL-MACHINE | 0 |
- +-------------------------------------------+---------+-------------------------+------+-----------------+-----------+
- | garm-bc180c6c-6e31-4c7b-8ce1-da0ffd76e247 | RUNNING | | | VIRTUAL-MACHINE | 0 |
- +-------------------------------------------+---------+-------------------------+------+-----------------+-----------+
- | garm-edeb8f46-ab09-4ed9-88fc-2731ecf9aabe | RUNNING | 10.247.246.219 (enp5s0) | | VIRTUAL-MACHINE | 0 |
- +-------------------------------------------+---------+-------------------------+------+-----------------+-----------+
- ```
-
-Once they transition to ```idle```, you should see them in your repo settings, under ```Actions --> Runners```.
-
-The procedure is identical for organizations. Have a look at the garm-cli help:
-
- ```bash
- ubuntu@experiments:~$ garm-cli -h
- CLI for the github self hosted runners manager.
-
- Usage:
- garm-cli [command]
-
- Available Commands:
- completion Generate the autocompletion script for the specified shell
- credentials List configured credentials
- debug-log Stream garm log
- enterprise Manage enterprise
- help Help about any command
- init Initialize a newly installed garm
- organization Manage organizations
- pool List pools
- profile Add, delete or update profiles
- provider Interacts with the providers API resource.
- repository Manage repositories
- runner List runners in a pool
- version Print version and exit
-
- Flags:
- --debug Enable debug on all API calls
- -h, --help help for garm-cli
-
- Use "garm-cli [command] --help" for more information about a command.
-
- ```
diff --git a/doc/scalesets.md b/doc/scalesets.md
new file mode 100644
index 00000000..de9d348e
--- /dev/null
+++ b/doc/scalesets.md
@@ -0,0 +1,93 @@
+# Scale Sets
+
+
+
+- [Scale Sets](#scale-sets)
+ - [Create a new scale set](#create-a-new-scale-set)
+ - [Scale Set vs Pool](#scale-set-vs-pool)
+
+
+
+GARM supports [scale sets](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller). This new mode of operation was added by GitHub to enable more efficient scheduling of runners using their own ARC (Actions Runner Controller) project. The APIs for enabling scale sets are not yet public and the scale set functionlity itself is not terribly well documented outside the context of ARC, but it can be implemented in third party auto scalers.
+
+In this document we will focus on how scale sets work, how they are different than pools and how to manage them.
+
+We'll start with detailing how to create a scale set.
+
+## Create a new scale set
+
+Creating a scale set is identical to [creating a pool](/doc/using_garm.md#creating-a-runner-pool), but instead of adding labels to a scale set, it takes a name. We'll assume you already have a provider enabled and you have added a repo, org or enterprise to GARM.
+
+```bash
+ubuntu@garm:~$ garm-cli repo ls
++--------------------------------------+-----------+--------------+------------+------------------+--------------------+------------------+
+| ID | OWNER | NAME | ENDPOINT | CREDENTIALS NAME | POOL BALANCER TYPE | POOL MGR RUNNING |
++--------------------------------------+-----------+--------------+------------+------------------+--------------------+------------------+
+| 84a5e82f-7ab1-427f-8ee0-4569b922296c | gsamfira | garm-testing | github.com | gabriel-samfira | roundrobin | true |
++--------------------------------------+-----------+--------------+------------+------------------+--------------------+------------------+
+```
+
+List providers:
+
+```bash
+ubuntu@garm:~$ garm-cli provider list
++--------------+---------------------------------+----------+
+| NAME | DESCRIPTION | TYPE |
++--------------+---------------------------------+----------+
+| incus | Incus external provider | external |
++--------------+---------------------------------+----------+
+| azure | azure provider | external |
++--------------+---------------------------------+----------+
+| aws_ec2 | Amazon EC2 provider | external |
++--------------+---------------------------------+----------+
+```
+
+Create a new scale set:
+
+```bash
+garm-cli scaleset add \
+ --repo 84a5e82f-7ab1-427f-8ee0-4569b922296c \
+ --provider-name incus \
+ --image ubuntu:22.04 \
+ --name garm-scale-set \
+ --flavor default \
+ --enabled true \
+ --min-idle-runners=0 \
+ --max-runners=20
++--------------------------+-----------------------+
+| FIELD | VALUE |
++--------------------------+-----------------------+
+| ID | 8 |
+| Scale Set ID | 14 |
+| Scale Name | garm-scale-set |
+| Provider Name | incus |
+| Image | ubuntu:22.04 |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 20 |
+| Min Idle Runners | 0 |
+| Runner Bootstrap Timeout | 20 |
+| Belongs to | gsamfira/garm-testing |
+| Level | repo |
+| Enabled | true |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | Default |
++--------------------------+-----------------------+
+```
+
+That's it. You now have a scale set created, ready to accept jobs.
+
+## Scale Set vs Pool
+
+Scale sets are a new way of managing runners. They were introduced by GitHub to enable more efficient scheduling of runners. Scale sets are meant to reduce API calls, improve reliability of message deliveries to the auto scaler and improve efficiency of runner scheduling. While webhooks work great most of the time, under heavy load, they may not fire or they may fire while the auto scaler is offline, leading to lost messages. If webhooks are fired while GARM is down, we will never know about those jobs unless we query the current workflow runs.
+
+Listing workflow runs is not feasible for orgs or enterprises, as that would mean listing all repos within an org then for each repository, listing all workflow runs. This gets worse for enterprises. Scale sets on the other hand allow GARM to subscribe to a message queue and get messages just for that scale set over HTTP long poll.
+
+Advantages of scale sets over pools:
+
+* No more need to install a webhook, reducing your security footprint.
+* Scheduling is done by GitHub. GARM receives runner requests from GitHub and GARM can choose to acquire those jobs or leave them for some other scaler.
+* Easier use of runner groups. While GARM supports runner groups, github currently [does not send the group name](https://github.com/orgs/community/discussions/158000) as part of webhooks in `queued` state. This prevents GARM (or any other auto scaler) to efficiently schedule runners to pools that have runner groups set. But given that in the case of scale sets, GitHub schedules the runners to the scaleset itself, we can efficiently create runners in certain runner groups.
+* scale set names must be unique within a runner group
diff --git a/doc/using_cached_runners.md b/doc/using_cached_runners.md
new file mode 100644
index 00000000..a5573b20
--- /dev/null
+++ b/doc/using_cached_runners.md
@@ -0,0 +1,52 @@
+# Using Cached Runners
+
+## GitHub Action Runners and GARM
+
+When a new instance is created by garm, it usually downloads the latest available GitHub action runner binary, installs the requirements and starts it afterwards. This can be a time consuming task that quickly adds up when a lot of instances are created by garm throughout the day. Therefore it is recommended to include the GitHub action runner binary inside of the used image.
+
+GARM supports cached runners on Linux and Windows images, in a simple manner. GARM verifies if the runner path exists (`C:\actions-runner` or `/home/runner/actions-runner`) on the chosen image, thus knowing if it needs to create the path and download the runner or use the existent runner. In order to simplify setup and validation of the runner, the check is based on the user properly creating, downloading and installing the runner in the predefined path on the target OS.
+
+>**NOTE:** More about these paths will be presented below in the sections for each target OS.
+
+### Cached Runners on Linux Images
+
+On a Linux image, the cached runner is expected by GARM to be setup in a static predefined way. It expects the cached runner to be installed in the `/home/runner/actions-runner` directory. Thus, the user needs to configure its custom image properly in order for GARM to use the cached runner and not download the latest available GitHub action runner binary.
+
+In order to configure a cached GitHub actions runner to work with GARM, the following steps need to be followed:
+
+1. The `actions-runner`directory needs to be created inside the `/home/runner` directory (home path for the garm runner)
+2. Download the wanted version of the runner package
+3. Extract the installer inside the `actions-runner` directory
+
+> **NOTE:** These are based on the steps described on the [actions/runner](https://github.com/actions/runner/releases) repository about installing the GitHub action runner on the Linux x64. The full list of commands looks like this:
+
+```bash
+# Create a folder
+mkdir actions-runner && cd actions-runner
+# Download the latest runner package
+curl -O -L https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-linux-x64-2.320.0.tar.gz
+# Extract the installer
+tar xzf ./actions-runner-linux-x64-2.320.0.tar.gz
+```
+
+### Cached Runners on Windows Images
+
+On a Windows image, the cached runner is expected by GARM to be setup in a static predefined way. It expects the cached runner to be installed in the `C:\actions-runner\` folder. Thus, the user needs to configure its custom image properly in order for GARM to use the cached runner and not download the latest available GitHub action runner binary.
+
+In order to configure a cached GitHub actions runner to work with GARM, the following steps need to be followed:
+
+1. Create the folder `actions-runner` inside the root folder (`C:\`).
+2. Download the wanted version of runner package
+3. Extract the installer in the folder created at step 1 (`C:\actions-runner\`)
+
+> **NOTE:** These are based on the steps described on the [actions/runner](https://github.com/actions/runner/releases) repository about installing the GitHub action runner on the Windows x64. The full list of commands looks like this:
+
+```powershell
+# Create a folder under the drive root
+mkdir \actions-runner ; cd \actions-runner
+# Download the latest runner package
+Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-win-x64-2.320.0.zip -OutFile actions-runner-win-x64-2.320.0.zip
+# Extract the installer
+Add-Type -AssemblyName System.IO.Compression.FileSystem ;
+[System.IO.Compression.ZipFile]::ExtractToDirectory("$PWD\actions-runner-win-x64-2.320.0.zip", "$PWD")
+```
\ No newline at end of file
diff --git a/doc/using_garm.md b/doc/using_garm.md
new file mode 100644
index 00000000..e7758410
--- /dev/null
+++ b/doc/using_garm.md
@@ -0,0 +1,816 @@
+# Using GARM
+
+This document will walk you through the various commands and options available in GARM. It is assumed that you have already installed GARM and have it running. If you haven't, please check out the [quickstart](/doc/quickstart.md) document for instructions on how to install GARM.
+
+While using the GARM cli, you will most likely spend most of your time listing pools and runners, but we will cover most of the available commands and options. Some of them we'll skip (like the `init` or `profile` subcommands), as they've been covered in the [quickstart](/doc/quickstart.md) document.
+
+
+- [Using GARM](#using-garm)
+ - [Controller operations](#controller-operations)
+ - [Listing controller info](#listing-controller-info)
+ - [Updating controller settings](#updating-controller-settings)
+ - [Providers](#providers)
+ - [Listing configured providers](#listing-configured-providers)
+ - [Github Endpoints](#github-endpoints)
+ - [Creating a GitHub Endpoint](#creating-a-github-endpoint)
+ - [Listing GitHub Endpoints](#listing-github-endpoints)
+ - [Getting information about an endpoint](#getting-information-about-an-endpoint)
+ - [Deleting a GitHub Endpoint](#deleting-a-github-endpoint)
+ - [GitHub credentials](#github-credentials)
+ - [Adding GitHub credentials](#adding-github-credentials)
+ - [Listing GitHub credentials](#listing-github-credentials)
+ - [Getting detailed information about credentials](#getting-detailed-information-about-credentials)
+ - [Deleting GitHub credentials](#deleting-github-credentials)
+ - [Repositories](#repositories)
+ - [Adding a new repository](#adding-a-new-repository)
+ - [Listing repositories](#listing-repositories)
+ - [Removing a repository](#removing-a-repository)
+ - [Organizations](#organizations)
+ - [Adding a new organization](#adding-a-new-organization)
+ - [Enterprises](#enterprises)
+ - [Adding an enterprise](#adding-an-enterprise)
+ - [Managing webhooks](#managing-webhooks)
+ - [Pools](#pools)
+ - [Creating a runner pool](#creating-a-runner-pool)
+ - [Listing pools](#listing-pools)
+ - [Showing pool info](#showing-pool-info)
+ - [Deleting a pool](#deleting-a-pool)
+ - [Update a pool](#update-a-pool)
+ - [Runners](#runners)
+ - [Listing runners](#listing-runners)
+ - [Showing runner info](#showing-runner-info)
+ - [Deleting a runner](#deleting-a-runner)
+ - [The debug-log command](#the-debug-log-command)
+ - [The debug-events command](#the-debug-events-command)
+ - [Listing recorded jobs](#listing-recorded-jobs)
+
+
+
+## Controller operations
+
+The `controller` is essentially GARM itself. Every deployment of GARM will have its own controller ID which will be used to tag runners in github. The controller is responsible for managing runners, webhooks, repositories, organizations and enterprises. There are a few settings at the controller level which you can tweak, which we will cover below.
+
+### Listing controller info
+
+You can list the controller info by running the following command:
+
+```bash
+garm-cli controller show
++-------------------------+----------------------------------------------------------------------------+
+| FIELD | VALUE |
++-------------------------+----------------------------------------------------------------------------+
+| Controller ID | a4dd5f41-8e1e-42a7-af53-c0ba5ff6b0b3 |
+| Hostname | garm |
+| Metadata URL | https://garm.example.com/api/v1/metadata |
+| Callback URL | https://garm.example.com/api/v1/callbacks |
+| Webhook Base URL | https://garm.example.com/webhooks |
+| Controller Webhook URL | https://garm.example.com/webhooks/a4dd5f41-8e1e-42a7-af53-c0ba5ff6b0b3 |
+| Minimum Job Age Backoff | 30 |
+| Version | v0.1.6 |
++-------------------------+----------------------------------------------------------------------------+
+```
+
+There are several things of interest in this output.
+
+* `Controller ID` - This is the unique identifier of the controller. Each GARM installation, on first run will automatically generate a unique controller ID. This is important for several reasons. For one, it allows us to run several GARM controllers on the same repos/orgs/enterprises, without accidentally clashing with each other. Each runner started by a GARM controller, will be tagged with this controller ID in order to easily identify runners that we manage.
+* `Hostname` - This is the hostname of the machine where GARM is running. This is purely informative.
+* `Metadata URL` - This URL is configured by the user, and is the URL that is presented to the runners via userdata when they get set up. Runners will connect to this URL and retrieve information they might need to set themselves up. GARM cannot automatically determine this URL, as it is dependent on the user's network setup. GARM may be hidden behind a load balancer or a reverse proxy, in which case, the URL by which the GARM controller can be accessed may be different than the IP addresses that are locally visible to GARM. Runners must be able to connect to this URL.
+* `Callback URL` - This URL is configured by the user, and is the URL that is presented to the runners via userdata when they get set up. Runners will connect to this URL and send status updates and system information (OS version, OS name, github runner agent ID, etc) to the controller. Runners must be able to connect to this URL.
+* `Webhook Base URL` - This is the base URL for webhooks. It is configured by the user in the GARM config file. This URL can be called into by GitHub itself when hooks get triggered by a workflow. GARM needs to know when a new job is started in order to schedule the creation of a new runner. Job webhooks sent to this URL will be recorded by GARM and acted upon. While you can configure this URL directly in your GitHub repo settings, it is advised to use the `Controller Webhook URL` instead, as it is unique to each controller, and allows you to potentially install multiple GARM controller inside the same repo. Github must be able to connect to this URL.
+* `Controller Webhook URL` - This is the URL that GitHub will call into when a webhook is triggered. This URL is unique to each GARM controller and is the preferred URL to use in order to receive webhooks from GitHub. It serves the same purpose as the `Webhook Base URL`, but is unique to each controller, allowing you to potentially install multiple GARM controllers inside the same repo. Github must be able to connect to this URL.
+* `Minimum Job Age Backoff` - This is the job age in seconds, after which GARM will consider spinning up a new runner to handle it. By default GARM waits for 30 seconds after receiving a new job, before it spins up a runner. This delay is there to allow any existing idle runners (managed by GARM or not) to pick up the job, before reacting to it. This way we avoid being too eager and spin up a runner for a job that would have been picked up by an existing runner anyway. You can set this to 0 if you want GARM to react immediately.
+* `Version` - This is the version of GARM that is running.
+
+We will see the `Controller Webhook URL` later when we set up the GitHub repo to send webhooks to GARM.
+
+### Updating controller settings
+
+As we've mentioned before, there are 3 URLs that are very important for normal operations:
+
+* `metadata_url` - Must be reachable by runners
+* `callback_url` - Must be reachable by runners
+* `webhook_url` - Must be reachable by GitHub
+
+These URLs depend heavily on how GARM was set up and what the network topology of the user is set up. GARM may be behind a NAT or reverse proxy. There may be different hostnames/URL paths set up for each of the above, etc. The short of it is that we cannot determine these URLs reliably and we must ask the user to tell GARM what they are.
+
+We can assume that the URL that the user logs in at to manage garm is the same URL that the rest of the URLs are present at, but that is just an assumption. By default, when you initialize GARM for the first time, we make this assumption to make things easy. It's also safe to assume that most users will do this anyway, but in case you don't, you will need to update the URLs in the controller and tell GARM what they are.
+
+In the previous section we saw that most URLs were set to `https://garm.example.com`. The URL path was the same as the routes that GARM sets up. For example, the `metadata_url` has `/api/v1/metadata`. The `callback_url` has `/api/v1/callbacks` and the `webhook_url` has `/webhooks`. This is the default setup and is what most users will use.
+
+If you need to update these URLs, you can use the following command:
+
+```bash
+garm-cli controller update \
+ --metadata-url https://garm.example.com/api/v1/metadata \
+ --callback-url https://garm.example.com/api/v1/callbacks \
+ --webhook-url https://garm.example.com/webhooks
+```
+
+The `Controller Webhook URL` you saw in the previous section is automatically calculated by GARM and is essentially the `webhook_url` with the controller ID appended to it. This URL is unique to each controller and is the preferred URL to use in order to receive webhooks from GitHub.
+
+After updating the URLs, make sure that they are properly routed to the appropriate API endpoint in GARM **and** that they are accessible by the interested parties (runners or github).
+
+## Providers
+
+GARM uses providers to create runners. These providers are external executables that GARM calls into to create runners in a particular IaaS.
+
+### Listing configured providers
+
+Once configured (see [provider configuration](/doc/config.md#providers)), you can list the configured providers by running the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli provider list
++--------------+---------------------------------+----------+
+| NAME | DESCRIPTION | TYPE |
++--------------+---------------------------------+----------+
+| incus | Incus external provider | external |
++--------------+---------------------------------+----------+
+| lxd | LXD external provider | external |
++--------------+---------------------------------+----------+
+| openstack | OpenStack external provider | external |
++--------------+---------------------------------+----------+
+| azure | Azure provider | external |
++--------------+---------------------------------+----------+
+| k8s_external | k8s external provider | external |
++--------------+---------------------------------+----------+
+| Amazon EC2 | Amazon EC2 provider | external |
++--------------+---------------------------------+----------+
+| equinix | Equinix Metal | external |
++--------------+---------------------------------+----------+
+```
+
+Each of these providers can be used to set up a runner pool for a repository, organization or enterprise.
+
+## Github Endpoints
+
+GARM can be used to manage runners for repos, orgs and enterprises hosted on `github.com` or on a GitHub Enterprise Server.
+
+Endpoints are the way that GARM identifies where the credentials and entities you create are located and where the API endpoints for the GitHub API can be reached, along with a possible CA certificate that validates the connection. There is a default endpoint for `github.com`, so you don't need to add it, unless you're using GHES.
+
+### Creating a GitHub Endpoint
+
+To create a GitHub endpoint, you can run the following command:
+
+```bash
+garm-cli github endpoint create \
+ --base-url https://ghes.example.com \
+ --api-base-url https://api.ghes.example.com \
+ --upload-url https://upload.ghes.example.com \
+ --ca-cert-path $HOME/ca-cert.pem \
+ --name example \
+ --description "Just an example ghes endpoint"
++----------------+------------------------------------------------------------------+
+| FIELD | VALUE |
++----------------+------------------------------------------------------------------+
+| Name | example |
+| Base URL | https://ghes.example.com |
+| Upload URL | https://upload.ghes.example.com |
+| API Base URL | https://api.ghes.example.com |
+| CA Cert Bundle | -----BEGIN CERTIFICATE----- |
+| | MIICBzCCAY6gAwIBAgIQX7fEm3dxkTeSc+E1uTFuczAKBggqhkjOPQQDAzA2MRkw |
+| | FwYDVQQKExBHQVJNIGludGVybmFsIENBMRkwFwYDVQQDExBHQVJNIGludGVybmFs |
+| | IENBMB4XDTIzMDIyNTE4MzE0NloXDTMzMDIyMjE4MzE0NlowNjEZMBcGA1UEChMQ |
+| | R0FSTSBpbnRlcm5hbCBDQTEZMBcGA1UEAxMQR0FSTSBpbnRlcm5hbCBDQTB2MBAG |
+| | ByqGSM49AgEGBSuBBAAiA2IABKat241Jzvkl+ksDuPq5jFf9wb5/l54NbGYYfcrs |
+| | 4d9/sNXtPP1y8pM61hs+hCltN9UEwtxqr48q5G7Oc3IjH/dddzJTDC2bLcpwysrC |
+| | NYLGtSfNj+o/8AQMwwclAY7t4KNhMF8wDgYDVR0PAQH/BAQDAgIEMB0GA1UdJQQW |
+| | MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW |
+| | BBSY+cSG07sIU2UC+fOniODKUGqiUTAKBggqhkjOPQQDAwNnADBkAjBcFz3cZ7vO |
+| | IFVzqn9eqXMmZDGp58HGneHhFhJsJtQE4BkxGQmgZJ2OgTGXDqjXG3wCMGMQRALt |
+| | JxwlI1PJJj7M0g48viS4NjT4kq2t/UFIbTy78aarFynUfykpL9FD9NOmiQ== |
+| | -----END CERTIFICATE----- |
+| | |
++----------------+------------------------------------------------------------------+
+```
+
+The name of the endpoint needs to be unique within GARM.
+
+### Listing GitHub Endpoints
+
+To list existing GitHub endpoints, run the following command:
+
+```bash
+garm-cli github endpoint list
++------------+--------------------------+-------------------------------+
+| NAME | BASE URL | DESCRIPTION |
++------------+--------------------------+-------------------------------+
+| github.com | https://github.com | The github.com endpoint |
++------------+--------------------------+-------------------------------+
+| example | https://ghes.example.com | Just an example ghes endpoint |
++------------+--------------------------+-------------------------------+
+```
+
+### Getting information about an endpoint
+
+To get information about a specific endpoint, you can run the following command:
+
+```bash
+garm-cli github endpoint show github.com
++--------------+-----------------------------+
+| FIELD | VALUE |
++--------------+-----------------------------+
+| Name | github.com |
+| Base URL | https://github.com |
+| Upload URL | https://uploads.github.com/ |
+| API Base URL | https://api.github.com/ |
++--------------+-----------------------------+
+```
+
+### Deleting a GitHub Endpoint
+
+You can delete an endpoint unless any of the following conditions are met:
+
+* The endpoint is the default endpoint for `github.com`
+* The endpoint is in use by a repository, organization or enterprise
+* There are credentials defined against the endpoint you are trying to remove
+
+To delete an endpoint, you can run the following command:
+
+```bash
+garm-cli github endpoint delete example
+```
+
+## GitHub credentials
+
+GARM needs access to your GitHub repositories, organizations or enterprise in order to manage runners. This is done via a [GitHub personal access token or via a GitHub App](/doc/github_credentials.md). You can configure multiple tokens or apps with access to various repositories, organizations or enterprises, either on GitHub or on GitHub Enterprise Server.
+
+### Adding GitHub credentials
+
+There are two types of credentials:
+
+* PAT - Personal Access Token
+* App - GitHub App
+
+To add each of these types of credentials, slightly different command line arguments (obviously) are required. I'm going to give you an example of both.
+
+To add a PAT, you can run the following command:
+
+```bash
+garm-cli github credentials add \
+ --name deleteme \
+ --description "just a test" \
+ --auth-type pat \
+ --pat-oauth-token gh_yourTokenGoesHere \
+ --endpoint github.com
+```
+
+To add a GitHub App (only available for repos and orgs), you can run the following command:
+
+```bash
+garm-cli github credentials add \
+ --name deleteme-app \
+ --description "just a test" \
+ --endpoint github.com \
+ --auth-type app \
+ --app-id 1 \
+ --app-installation-id 99 \
+ --private-key-path /etc/garm/yiourGarmAppKey.2024-12-12.private-key.pem
+```
+
+Notice that in both cases we specified the github endpoint for which these credentials are valid.
+
+### Listing GitHub credentials
+
+To list existing credentials, run the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli github credentials ls
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| ID | NAME | DESCRIPTION | BASE URL | API URL | UPLOAD URL | TYPE |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| 1 | gabriel | github token or user gabriel | https://github.com | https://api.github.com/ | https://uploads.github.com/ | pat |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+| 2 | gabriel_org | github token with org level access | https://github.com | https://api.github.com/ | https://uploads.github.com/ | app |
++----+-------------+------------------------------------+--------------------+-------------------------+-----------------------------+------+
+```
+
+For more information about credentials, see the [github credentials](/doc/github_credentials.md) section for more details.
+
+### Getting detailed information about credentials
+
+To get detailed information about one specific credential, you can run the following command:
+
+```bash
+garm-cli github credentials show 2
++---------------+------------------------------------+
+| FIELD | VALUE |
++---------------+------------------------------------+
+| ID | 2 |
+| Name | gabriel_org |
+| Description | github token with org level access |
+| Base URL | https://github.com |
+| API URL | https://api.github.com/ |
+| Upload URL | https://uploads.github.com/ |
+| Type | app |
+| Endpoint | github.com |
+| | |
+| Repositories | gsamfira/garm-testing |
+| | |
+| Organizations | gsamfira |
++---------------+------------------------------------+
+```
+
+### Deleting GitHub credentials
+
+To delete a credential, you can run the following command:
+
+```bash
+garm-cli github credentials delete 2
+```
+
+> **NOTE**: You may not delete credentials that are currently associated with a repository, organization or enterprise. You will need to first replace the credentials on the entity, and then you can delete the credentials.
+
+## Repositories
+
+### Adding a new repository
+
+To add a new repository we need to use credentials that has access to the repository. We've listed credentials above, so let's add our first repository:
+
+```bash
+ubuntu@garm:~$ garm-cli repository add \
+ --name garm \
+ --owner gabriel-samfira \
+ --credentials gabriel \
+ --install-webhook \
+ --pool-balancer-type roundrobin \
+ --random-webhook-secret
++----------------------+--------------------------------------+
+| FIELD | VALUE |
++----------------------+--------------------------------------+
+| ID | 0c91d9fd-2417-45d4-883c-05daeeaa8272 |
+| Owner | gabriel-samfira |
+| Name | garm |
+| Pool balancer type | roundrobin |
+| Credentials | gabriel |
+| Pool manager running | true |
++----------------------+--------------------------------------+
+```
+
+Lets break down the command a bit and explain what happened above. We added a new repository to GARM, that belogs to the user `gabriel-samfira` and is called `garm`. When using GitHub, this translates to `https://github.com/gabriel-samfira/garm`.
+
+As part of the above command, we used the credentials called `gabriel` to authenticate to GitHub. If those credentials didn't have access to the repository, we would have received an error when adding the repo.
+
+The other interesting bit about the above command is that we automatically added the `webhook` to the repository and generated a secure random secret to authenticate the webhooks that come in from GitHub for this new repo. Any webhook claiming to be for the `gabriel-samfira/garm` repo, will be validated against the secret that was generated.
+
+Another important aspect to remember is that once the entity (in this case a repository) is created, the credentials associated with the repo at creation time, dictates the GitHub endpoint in which this repository exists.
+
+When updating credentials for this entity, the new credentials **must** be associated with the same endpoint as the old ones. An error is returned if the repo is associated with `github.com` but the new credentials you're trying to set are associated with a GHES endpoint.
+
+### Listing repositories
+
+To list existing repositories, run the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli repository list
++--------------------------------------+-----------------+--------------+------------------+--------------------+------------------+
+| ID | OWNER | NAME | CREDENTIALS NAME | POOL BALANCER TYPE | POOL MGR RUNNING |
++--------------------------------------+-----------------+--------------+------------------+--------------------+------------------+
+| be3a0673-56af-4395-9ebf-4521fea67567 | gabriel-samfira | garm | gabriel | roundrobin | true |
++--------------------------------------+-----------------+--------------+------------------+--------------------+------------------+
+```
+
+This will list all the repositories that GARM is currently managing.
+
+### Removing a repository
+
+To remove a repository, you can use the following command:
+
+```bash
+garm-cli repository delete be3a0673-56af-4395-9ebf-4521fea67567
+```
+
+This will remove the repository from GARM, and if a webhook was installed, will also clean up the webhook from the repository.
+
+> **NOTE**: GARM will not remove a webhook that points to the `Base Webhook URL`. It will only remove webhooks that are namespaced to the running controller.
+
+## Organizations
+
+### Adding a new organization
+
+Adding a new organization is similar to adding a new repository. You need to use credentials that have access to the organization, and you can add the organization to GARM using the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli organization add \
+ --credentials gabriel_org \
+ --name gsamfira \
+ --install-webhook \
+ --random-webhook-secret
++----------------------+--------------------------------------+
+| FIELD | VALUE |
++----------------------+--------------------------------------+
+| ID | b50f648d-708f-48ed-8a14-cf58887af9cf |
+| Name | gsamfira |
+| Credentials | gabriel_org |
+| Pool manager running | true |
++----------------------+--------------------------------------+
+```
+
+This will add the organization `gsamfira` to GARM, and install a webhook for it. The webhook will be validated against the secret that was generated. The only difference between adding an organization and adding a repository is that you use the `organization` subcommand instead of the `repository` subcommand, and the `--name` option represents the `name` of the organization.
+
+Managing webhooks for organizations is similar to managing webhooks for repositories. You can *list*, *show*, *install* and *uninstall* webhooks for organizations using the `garm-cli organization webhook` subcommand. We won't go into details here, as it's similar to managing webhooks for repositories.
+
+All the other operations that exist on repositories, like listing, removing, etc, also exist for organizations and enterprises. Check out the help for the `garm-cli organization` subcommand for more details.
+
+## Enterprises
+
+### Adding an enterprise
+
+Enterprises are a bit special. Currently we don't support managing webhooks for enterprises, mainly because the level of access that would be required to do so seems a bit too much to enable in GARM itself. And considering that you'll probably ever only have one enterprise with multiple organizations and repositories, the effort/risk to benefit ratio makes this feature not worth implementing at the moment.
+
+To add an enterprise to GARM, you can use the following command:
+
+```bash
+garm-cli enterprise add \
+ --credentials gabriel_enterprise \
+ --name samfira \
+ --webhook-secret SuperSecretWebhookTokenPleaseReplaceMe
+```
+
+The `name` of the enterprise is the ["slug" of the enterprise](https://docs.github.com/en/enterprise-cloud@latest/admin/managing-your-enterprise-account/creating-an-enterprise-account).
+
+You will then have to manually add the `Controller Webhook URL` to the enterprise in the GitHub UI.
+
+All the other operations that exist on repositories, like listing, removing, etc, also exist for organizations and enterprises. Have a look at the help for the `garm-cli enterprise` subcommand for more details.
+
+At that point the enterprise will be added to GARM and you can start managing runners for it.
+
+## Managing webhooks
+
+Webhook management is available for repositories and organizations. I'm going to show you how to manage webhooks for a repository, but the same commands apply for organizations. See `--help` for more details.
+
+When we added the repository in the previous section, we specified the `--install-webhook` and the `--random-webhook-secret` options. These two options automatically added a webhook to the repository and generated a random secret for it. The `webhook` URL that was used, will correspond to the `Controller Webhook URL` that we saw earlier when we listed the controller info. Let's list it and see what it looks like:
+
+```bash
+ubuntu@garm:~$ garm-cli repository webhook show be3a0673-56af-4395-9ebf-4521fea67567
++--------------+----------------------------------------------------------------------------+
+| FIELD | VALUE |
++--------------+----------------------------------------------------------------------------+
+| ID | 460257636 |
+| URL | https://garm.example.com/webhooks/a4dd5f41-8e1e-42a7-af53-c0ba5ff6b0b3 |
+| Events | [workflow_job] |
+| Active | true |
+| Insecure SSL | false |
++--------------+----------------------------------------------------------------------------+
+```
+
+We can see that it's active, and the events to which it subscribed.
+
+The `--install-webhook` and `--random-webhook-secret` options are convenience options that allow you to quickly add a new repository to GARM and have it ready to receive webhooks from GitHub. As long as you configured the URLs correctly (see previous sections for details), you should see a green checkmark in the GitHub settings page, under `Webhooks`.
+
+If you don't want to install the webhook, you can add the repository without it, and then install it later using the `garm-cli repository webhook install` command (which we'll show in a second) or manually add it in the GitHub UI.
+
+To uninstall a webhook from a repository, you can use the following command:
+
+```bash
+garm-cli repository webhook uninstall be3a0673-56af-4395-9ebf-4521fea67567
+```
+
+After which listing the webhook will show that it's inactive:
+
+```bash
+ubuntu@garm:~$ garm-cli repository webhook show be3a0673-56af-4395-9ebf-4521fea67567
+Error: [GET /repositories/{repoID}/webhook][404] GetRepoWebhookInfo default {Error:Not Found Details:hook not found}
+```
+
+You can always add it back using:
+
+```bash
+ubuntu@garm:~$ garm-cli repository webhook install be3a0673-56af-4395-9ebf-4521fea67567
++--------------+----------------------------------------------------------------------------+
+| FIELD | VALUE |
++--------------+----------------------------------------------------------------------------+
+| ID | 460258767 |
+| URL | https://garm.example.com/webhooks/a4dd5f41-8e1e-42a7-af53-c0ba5ff6b0b3 |
+| Events | [workflow_job] |
+| Active | true |
+| Insecure SSL | false |
++--------------+----------------------------------------------------------------------------+
+```
+
+To allow GARM to manage webhooks, the PAT or app you're using must have the `admin:repo_hook` and `admin:org_hook` scopes (or equivalent). Webhook management is not available for enterprises. For enterprises you will have to add the webhook manually.
+
+To manually add a webhook, see the [webhooks](/doc/webhooks.md) section.
+
+## Pools
+
+### Creating a runner pool
+
+Now that we have a repository, organization or enterprise added to GARM, we can create a runner pool for it. A runner pool is a collection of runners of the same type, that are managed by GARM and are used to run workflows for the repository, organization or enterprise.
+
+You can create multiple pools of runners for the same entity (repository, organization or enterprise), and you can create multiple pools of runners, each pool defining different runner types. For example, you can have a pool of runners that are created on AWS, and another pool of runners that are created on Azure, k8s, LXD, etc. For repositories or organizations with complex needs, you can set up a number of pools that cover a wide range of needs, based on cost, capability (GPUs, FPGAs, etc) or sheer raw computing power. You don't have to pick just one, especially since managing all of them is done using the exact same commands, as we'll show below.
+
+Before we create a pool, we have to decide which provider we want to use. We've listed the providers above, so let's pick one and create a pool of runners for our repository. For the purpose of this example, we'll use the `incus` provider. We'll show you how to create a pool using this provider, but keep in mind that adding another pool using a different provider is done using the exact same commands. The only difference will be in the `--image`, `--flavor` and `--extra-specs` options that you'll use when creating the pool.
+
+Out of those three options, only the `--image` and `--flavor` are mandatory. The `--extra-specs` flag is optional and is used to pass additional information to the provider when creating the pool. The `--extra-specs` option is provider specific, and you'll have to consult the provider documentation to see what options are available.
+
+But I digress. Let's create a pool of runners using the `incus` provider, for the `gabriel-samfira/garm` repository we created above:
+
+```bash
+garm-cli pool add \
+ --enabled=false \
+ --repo be3a0673-56af-4395-9ebf-4521fea67567 \
+ --image "images:ubuntu/22.04/cloud" \
+ --flavor default \
+ --provider-name incus \
+ --min-idle-runners 1 \
+ --tags ubuntu,incus
++--------------------------+----------------------------------------+
+| FIELD | VALUE |
++--------------------------+----------------------------------------+
+| ID | 9daa34aa-a08a-4f29-a782-f54950d8521a |
+| Provider Name | incus |
+| Image | images:ubuntu/22.04/cloud |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 5 |
+| Min Idle Runners | 1 |
+| Runner Bootstrap Timeout | 20 |
+| Tags | ubuntu, incus |
+| Belongs to | gabriel-samfira/garm |
+| Level | repo |
+| Enabled | false |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | |
++--------------------------+----------------------------------------+
+```
+
+Let's unpack the command and explain what happened above. We added a new pool of runners to GARM, that belongs to the `gabriel-samfira/garm` repository. We used the `incus` provider to create the pool, and we specified the `--image` and `--flavor` options to tell the provider what kind of runners we want to create. On Incus and LXD, the flavor maps to a `profile`. The profile can specify the resources allocated to a container or VM (RAM, CPUs, disk space, etc). The image maps to an incus or LXD image, as you would normally use when spinning up a new container or VM using the `incus launch` command.
+
+We also specified the `--min-idle-runners` option to tell GARM to always keep at least 1 runner idle in the pool. This is useful for repositories that have a lot of workflows that run often, and we want to make sure that we always have a runner ready to pick up a job.
+
+If we review the output of the command, we can see that the pool was created with a maximum number of 5 runners. This is just a default we can tweak when creating the pool, or later using the `garm-cli pool update` command. We can also see that the pool was created with a runner botstrap timeout of 20 minutes. This timeout is important on provider where the instance may take a long time to spin up. For example, on Equinix Metal, some operating systems can take a few minutes to install and reboot. This timeout can be tweaked to a higher value to account for this.
+
+The pool was created with the `--enabled` flag set to `false`, so the pool won't create any runners yet:
+
+```bash
+ubuntu@garm:~$ garm-cli runner list 9daa34aa-a08a-4f29-a782-f54950d8521a
++----+------+--------+---------------+---------+
+| NR | NAME | STATUS | RUNNER STATUS | POOL ID |
++----+------+--------+---------------+---------+
++----+------+--------+---------------+---------+
+```
+
+### Listing pools
+
+To list pools created for a repository you can run:
+
+```bash
+ubuntu@garm:~$ garm-cli pool list --repo=be3a0673-56af-4395-9ebf-4521fea67567
++--------------------------------------+---------------------------+---------+--------------+------------+-------+---------+---------------+
+| ID | IMAGE | FLAVOR | TAGS | BELONGS TO | LEVEL | ENABLED | RUNNER PREFIX |
++--------------------------------------+---------------------------+---------+--------------+------------+-------+---------+---------------+
+| 9daa34aa-a08a-4f29-a782-f54950d8521a | images:ubuntu/22.04/cloud | default | ubuntu incus | | | false | garm |
++--------------------------------------+---------------------------+---------+--------------+------------+-------+---------+---------------+
+```
+
+If you want to list pools for an organization or enterprise, you can use the `--org` or `--enterprise` options respectively.
+
+In the absence or the `--repo`, `--org` or `--enterprise` options, the command will list all pools in GARM, regardless of the entity they belong to.
+
+```bash
+ubuntu@garm:~/garm$ garm-cli pool list
++--------------------------------------+---------------------------+--------------+-----------------------------------------+------------------+-------+---------+---------------+----------+
+| ID | IMAGE | FLAVOR | TAGS | BELONGS TO | LEVEL | ENABLED | RUNNER PREFIX | PRIORITY |
++--------------------------------------+---------------------------+--------------+-----------------------------------------+------------------+-------+---------+---------------+----------+
+| 8935f6a6-f20f-4220-8fa9-9075e7bd7741 | windows_2022 | c3.small.x86 | self-hosted x64 Windows windows equinix | gsamfira/scripts | repo | false | garm | 0 |
++--------------------------------------+---------------------------+--------------+-----------------------------------------+------------------+-------+---------+---------------+----------+
+| 9233b3f5-2ccf-4689-8f86-a8a0d656dbeb | runner-upstream:latest | small | self-hosted x64 Linux k8s org | gsamfira | org | false | garm | 0 |
++--------------------------------------+---------------------------+--------------+-----------------------------------------+------------------+-------+---------+---------------+----------+
+```
+
+### Showing pool info
+
+You can get detailed information about a pool by running the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli pool show 9daa34aa-a08a-4f29-a782-f54950d8521a
++--------------------------+----------------------------------------+
+| FIELD | VALUE |
++--------------------------+----------------------------------------+
+| ID | 9daa34aa-a08a-4f29-a782-f54950d8521a |
+| Provider Name | incus |
+| Image | images:ubuntu/22.04/cloud |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 5 |
+| Min Idle Runners | 1 |
+| Runner Bootstrap Timeout | 20 |
+| Tags | ubuntu, incus |
+| Belongs to | gabriel-samfira/garm |
+| Level | repo |
+| Enabled | false |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | |
++--------------------------+----------------------------------------+
+```
+
+### Deleting a pool
+
+In order to delete a pool, you must first make sure there are no runners in the pool. To ensure this, we can first disable the pool, to make sure no new runners are created, remove the runners or allow them to be user, then we can delete the pool.
+
+To disable a pool, you can use the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli pool update 9daa34aa-a08a-4f29-a782-f54950d8521a --enabled=false
++--------------------------+----------------------------------------+
+| FIELD | VALUE |
++--------------------------+----------------------------------------+
+| ID | 9daa34aa-a08a-4f29-a782-f54950d8521a |
+| Provider Name | incus |
+| Image | images:ubuntu/22.04/cloud |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 5 |
+| Min Idle Runners | 1 |
+| Runner Bootstrap Timeout | 20 |
+| Tags | ubuntu, incus |
+| Belongs to | gabriel-samfira/garm |
+| Level | repo |
+| Enabled | false |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | |
++--------------------------+----------------------------------------+
+```
+
+If there are no runners in the pool, you can then remove it:
+
+```bash
+ubuntu@garm:~$ garm-cli pool delete 9daa34aa-a08a-4f29-a782-f54950d8521a
+```
+
+### Update a pool
+
+You can update a pool by using the `garm-cli pool update` command. Nearly every aspect of a pool can be updated after it has been created. To demonstrate the command, we can enable the pool we created earlier:
+
+```bash
+ubuntu@garm:~$ garm-cli pool update 9daa34aa-a08a-4f29-a782-f54950d8521a --enabled=true
++--------------------------+----------------------------------------+
+| FIELD | VALUE |
++--------------------------+----------------------------------------+
+| ID | 9daa34aa-a08a-4f29-a782-f54950d8521a |
+| Provider Name | incus |
+| Image | images:ubuntu/22.04/cloud |
+| Flavor | default |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| Max Runners | 5 |
+| Min Idle Runners | 1 |
+| Runner Bootstrap Timeout | 20 |
+| Tags | ubuntu, incus |
+| Belongs to | gabriel-samfira/garm |
+| Level | repo |
+| Enabled | true |
+| Runner Prefix | garm |
+| Extra specs | |
+| GitHub Runner Group | |
++--------------------------+----------------------------------------+
+```
+
+See `garm-cli pool update --help` for a list of settings that can be changed.
+
+Now that the pool is enabled, GARM will start creating runners for it. We can list the runners in the pool to see if any have been created:
+
+```bash
+ubuntu@garm:~$ garm-cli runner list 9daa34aa-a08a-4f29-a782-f54950d8521a
++----+-------------------+---------+---------------+--------------------------------------+
+| NR | NAME | STATUS | RUNNER STATUS | POOL ID |
++----+-------------------+---------+---------------+--------------------------------------+
+| 1 | garm-BFrp51VoVBCO | running | installing | 9daa34aa-a08a-4f29-a782-f54950d8521a |
++----+-------------------+---------+---------------+--------------------------------------+
+```
+
+We can see that a runner has been created and is currently being installed. If we check incus, we should also see it there as well:
+
+```bash
+root@incus:~# incus list
++-------------------+---------+----------------------+-----------------------------------------------+-----------+-----------+
+| NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS |
++-------------------+---------+----------------------+-----------------------------------------------+-----------+-----------+
+| garm-BFrp51VoVBCO | RUNNING | 10.23.120.217 (eth0) | fd42:e6ea:8b6c:6cb9:216:3eff:feaa:fabf (eth0) | CONTAINER | 0 |
++-------------------+---------+----------------------+-----------------------------------------------+-----------+-----------+
+```
+
+Awesome! This runner will be able to pick up jobs that match the labels we've set on the pool.
+
+## Runners
+
+### Listing runners
+
+You can list runners for a pool, for a repository, organization or enterprise, or for all of them. To list all runners, you can run:
+
+```bash
+ubuntu@garm:~$ garm-cli runner list
++----+---------------------+---------+---------------+--------------------------------------+
+| NR | NAME | STATUS | RUNNER STATUS | POOL ID |
++----+---------------------+---------+---------------+--------------------------------------+
+| 1 | garm-jZWtnxYHR6sG | running | idle | 8ec34c1f-b053-4a5d-80d6-40afdfb389f9 |
++----+---------------------+---------+---------------+--------------------------------------+
+| 2 | garm-2vtBBaT2dgIvFg | running | idle | c03c8101-3ae0-49d7-98b7-298a3689d24c |
++----+---------------------+---------+---------------+--------------------------------------+
+| 3 | garm-Ew7SzN6LVlEC | running | idle | 577627f4-1add-4a45-9c62-3a7cbdec8403 |
++----+---------------------+---------+---------------+--------------------------------------+
+| 4 | garm-BFrp51VoVBCO | running | idle | 9daa34aa-a08a-4f29-a782-f54950d8521a |
++----+---------------------+---------+---------------+--------------------------------------+
+```
+
+Have a look at the help command for the flags available to the `list` subcommand.
+
+### Showing runner info
+
+You can get detailed information about a runner by running the following command:
+
+```bash
+ubuntu@garm:~$ garm-cli runner show garm-BFrp51VoVBCO
++-----------------+------------------------------------------------------------------------------------------------------+
+| FIELD | VALUE |
++-----------------+------------------------------------------------------------------------------------------------------+
+| ID | b332a811-0ebf-474c-9997-780124e22382 |
+| Provider ID | garm-BFrp51VoVBCO |
+| Name | garm-BFrp51VoVBCO |
+| OS Type | linux |
+| OS Architecture | amd64 |
+| OS Name | Ubuntu |
+| OS Version | 22.04 |
+| Status | running |
+| Runner Status | idle |
+| Pool ID | 9daa34aa-a08a-4f29-a782-f54950d8521a |
+| Addresses | 10.23.120.217 |
+| | fd42:e6ea:8b6c:6cb9:216:3eff:feaa:fabf |
+| Status Updates | 2024-02-11T23:39:54: downloading tools from https://github.com/actions/runner/releases/download/v2.3 |
+| | 12.0/actions-runner-linux-x64-2.312.0.tar.gz |
+| | 2024-02-11T23:40:04: extracting runner |
+| | 2024-02-11T23:40:07: installing dependencies |
+| | 2024-02-11T23:40:13: configuring runner |
+| | 2024-02-11T23:40:13: runner registration token was retrieved |
+| | 2024-02-11T23:40:19: runner successfully configured after 1 attempt(s) |
+| | 2024-02-11T23:40:20: installing runner service |
+| | 2024-02-11T23:40:20: starting service |
+| | 2024-02-11T23:40:21: runner successfully installed |
++-----------------+------------------------------------------------------------------------------------------------------+
+```
+
+### Deleting a runner
+
+You can delete a runner by running the following command:
+
+```bash
+garm-cli runner rm garm-BFrp51VoVBCO
+```
+
+Only idle runners can be removed. If a runner is executing a job, it cannot be removed. However, a runner that is currently running a job, will be removed anyway when that job finishes. You can wait for the job to finish or you can cancel the job from the github workflow page.
+
+In some cases, providers may error out when creating or deleting a runner. This can happen if the provider is misconfigured. To avoid situations in which GARM gets deadlocked trying to remove a runner from a provider that is in err, we can forcefully remove a runner. The `--force` flag will make GARM ignore any error returned by the provider when attempting to delete an instance:
+
+```bash
+garm-cli runner remove --force garm-BFrp51VoVBCO
+```
+
+Awesome! We've covered all the major parts of using GARM. This is all you need to have your workflows run on your self-hosted runners. Of course, each provider may have its own particularities, config options, extra specs and caveats (all of which should be documented in the provider README), but once added to the GARM config, creating a pool should be the same.
+
+## The debug-log command
+
+GARM outputs logs to standard out, log files and optionally to a websocket for easy debugging. This is just a convenience feature that allows you to stream logs to your terminal without having to log into the server. It's disabled by default, but if you enable it, you'll be able to run:
+
+```bash
+ubuntu@garm:~$ garm-cli debug-log
+time=2024-02-12T08:36:18.584Z level=INFO msg=access_log method=GET uri=/api/v1/ws user_agent=Go-http-client/1.1 ip=127.0.0.1:47260 code=200 bytes=0 request_time=447.445µs
+time=2024-02-12T08:36:31.251Z level=INFO msg=access_log method=GET uri=/api/v1/instances user_agent=Go-http-client/1.1 ip=127.0.0.1:58460 code=200 bytes=1410 request_time=656.184µs
+```
+
+This will bring a real-time log to your terminal. While this feature should be fairly secure, I encourage you to only expose it within networks you know are secure. This can be done by configuring a reverse proxy in front of GARM that only allows connections to the websocket endpoint from certain locations.
+
+## The debug-events command
+
+Starting with GARM v0.1.5 a new command has been added to the CLI that consumes database events recorded by GARM. Whenever something is updated in the database, a new event is generated. These events are generated by the database watcher and are also exported via a websocket endpoint. This websocket endpoint is meant to be consumed by applications that wish to integrate GARM and want to avoid having to poll the API.
+
+This command is not meant to be used to integrate GARM events, it is mearly a debug tool that allows you to see what events are being generated by GARM. To use it, you can run:
+
+```bash
+garm-cli debug-events --filters='{"send-everything": true}'
+```
+
+This command will send all events to your CLI as they happen. You can also filter by entity or operation like so:
+
+```bash
+garm-cli debug-events --filters='{"filters": [{"entity-type": "instance", "operations": ["create", "delete"]}, {"entity-type": "pool"}, {"entity-type": "controller"}]}'
+```
+
+The payloads that get sent to your terminal are described in the [events](/doc/events.md) section, but the short description is that you get the operation type (create, update, delete), the entity type (instance, pool, repo, etc) and the json payload as you normaly would when you fetch them through the API. Sensitive info like tokens or passwords are never returned.
+
+## Listing recorded jobs
+
+GARM will record any job that comes in and for which we have a pool configured. If we don't have a pool for a particular job, then that job is ignored. There is no point in recording jobs that we can't do anything about. It would just bloat the database for no reason.
+
+To view existing jobs, run the following command:
+
+```bash
+garm-cli job list
+```
+
+If you've just set up GARM and have not yet created a pool or triggered a job, this will be empty. If you've configured everything and still don't receive jobs, you'll need to make sure that your URLs (discussed at the begining of this article), are correct. GitHub needs to be able to reach the webhook URL that our GARM instance listens on.
\ No newline at end of file
diff --git a/doc/webhooks.md b/doc/webhooks.md
new file mode 100644
index 00000000..ab29937b
--- /dev/null
+++ b/doc/webhooks.md
@@ -0,0 +1,56 @@
+# Webhooks
+
+Garm is designed to auto-scale github runners. To achieve this, ```garm``` relies on [GitHub Webhooks](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). Webhooks allow ```garm``` to react to workflow events from your repository, organization or enterprise.
+
+In your repository or organization, navigate to ```Settings --> Webhooks```:
+
+
+
+And click on ```Add webhook```.
+
+In the ```Payload URL``` field, enter the URL to the ```garm``` webhook endpoint. The ```garm``` API endpoint for webhooks is:
+
+ ```txt
+ POST /webhooks
+ ```
+
+If ```garm``` is running on a server under the domain ```garm.example.com```, then that field should be set to ```https://garm.example.com/webhooks```.
+
+In the webhook configuration page under ```Content type``` you will need to select ```application/json```, set the proper webhook URL and, really important, **make sure you configure a webhook secret**. Garm will authenticate the payloads to make sure they are coming from GitHub.
+
+The webhook secret must be secure. Use something like this to generate one:
+
+ ```bash
+ gabriel@rossak:~$ function generate_secret () {
+ tr -dc 'a-zA-Z0-9!@#$%^&*()_+?><~\`;' < /dev/urandom | head -c 64;
+ echo ''
+ }
+
+ gabriel@rossak:~$ generate_secret
+ 9Q*nsr*S54g0imK64(!2$Ns6C!~VsH(p)cFj+AMLug%LM!R%FOQ
+ ```
+
+Make a note of that secret, as you'll need it later when you define the repo/org/enterprise in ```GARM```.
+
+
+
+While you can use `http` for your webhook, I highly recommend you set up a proper x509 certificate for your GARM server and use `https` instead. If you choose `https`, GitHub will present you with an additional option to configure the SSL certificate verification.
+
+
+
+If you're testing and want to use a self signed certificate, you can disable SSL verification or just use `http`, but for production you should use `https` with a proper certificate and SSL verification set to `enabled`.
+
+It's fairly trivial to set up a proper x509 certificate for your GARM server. You can use [Let's Encrypt](https://letsencrypt.org/) to get a free certificate.
+
+
+Next, you can choose which events GitHub should send to ```garm``` via webhooks. Click on ```Let me select individual events```.
+
+
+
+Now select ```Workflow jobs``` (should be at the bottom). You can send everything if you want, but any events ```garm``` doesn't care about will simply be ignored.
+
+
+
+Finally, click on ```Add webhook``` and you're done.
+
+GitHub will send a test webhook to your endpoint. If all is well, you should see a green checkmark next to your webhook.
\ No newline at end of file
diff --git a/doc/webhooks_and_callbacks.md b/doc/webhooks_and_callbacks.md
deleted file mode 100644
index dfe1b3a1..00000000
--- a/doc/webhooks_and_callbacks.md
+++ /dev/null
@@ -1,94 +0,0 @@
-# Webhooks
-
-Garm is designed to auto-scale github runners based on a few simple rules:
-
-* A minimum idle runner count can be set for a pool. Garm will attempt to maintain that minimum of idle runners, ready to be used by your workflows.
-* A maximum number of runners for a pool. This is a hard limit of runners a pool will create, regardless of minimum idle runners.
-* When a runner is scheduled by github, ```garm``` will automatically spin up a new runner to replace it, obeying the maximum hard limit defined.
-
-To achieve this, ```garm``` relies on [GitHub Webhooks](https://docs.github.com/en/developers/webhooks-and-events/webhooks/about-webhooks). Webhooks allow ```garm``` to react to workflow events from your repository or organization.
-
-In your repository or organization, navigate to ```Settings --> Webhooks```. In the ```Payload URL``` field, enter the URL to the ```garm``` webhook endpoint. The ```garm``` API endpoint for webhooks is:
-
- ```txt
- POST /webhooks
- ```
-
-If ```garm``` is running on a server under the domain ```garm.example.com```, then that field should be set to ```https://garm.example.com/webhooks```.
-
-In the webhook configuration page under ```Content type``` you will need to select ```application/json```, set the proper webhook URL and, really important, **make sure you configure a webhook secret**. Garm will authenticate the payloads to make sure they are coming from GitHub.
-
-The webhook secret must be secure. Use something like this to generate one:
-
- ```bash
- gabriel@rossak:~$ function generate_secret () {
- tr -dc 'a-zA-Z0-9!@#$%^&*()_+?><~\`;' < /dev/urandom | head -c 64;
- echo ''
- }
-
- gabriel@rossak:~$ generate_secret
- 9Q*nsr*S54g0imK64(!2$Ns6C!~VsH(p)cFj+AMLug%LM!R%FOQ
- ```
-
-Next, you can choose which events GitHub should send to ```garm``` via webhooks. Click on ```Let me select individual events``` and select ```Workflow jobs``` (should be at the bottom). You can send everything if you want, but any events ```garm``` doesn't care about will simply be ignored.
-
-## The callback_url option
-
-Your runners will call back home with status updates as they install. Once they are set up, they will also send the GitHub agent ID they were allocated. You will need to configure the ```callback_url``` option in the ```garm``` server config. This URL needs to point to the following API endpoint:
-
- ```txt
- POST /api/v1/callbacks/status
- ```
-
-Example of a runner sending status updates:
-
- ```bash
- garm-cli runner show garm-f5227755-129d-4e2d-b306-377a8f3a5dfe
- +-----------------+--------------------------------------------------------------------------------------------------------------------------------------------------+
- | FIELD | VALUE |
- +-----------------+--------------------------------------------------------------------------------------------------------------------------------------------------+
- | ID | 1afb407b-e9f7-4d75-a410-fc4a8c2dbe6c |
- | Provider ID | garm-f5227755-129d-4e2d-b306-377a8f3a5dfe |
- | Name | garm-f5227755-129d-4e2d-b306-377a8f3a5dfe |
- | OS Type | linux |
- | OS Architecture | amd64 |
- | OS Name | ubuntu |
- | OS Version | focal |
- | Status | running |
- | Runner Status | idle |
- | Pool ID | 98f438b9-5549-4eaf-9bb7-1781533a455d |
- | Status Updates | 2022-05-05T11:32:41: downloading tools from https://github.com/actions/runner/releases/download/v2.290.1/actions-runner-linux-x64-2.290.1.tar.gz |
- | | 2022-05-05T11:32:43: extracting runner |
- | | 2022-05-05T11:32:47: installing dependencies |
- | | 2022-05-05T11:32:55: configuring runner |
- | | 2022-05-05T11:32:59: installing runner service |
- | | 2022-05-05T11:33:00: starting service |
- | | 2022-05-05T11:33:00: runner successfully installed |
- +-----------------+--------------------------------------------------------------------------------------------------------------------------------------------------+
- ```
-
-This URL must be set and must be accessible by the instance. If you wish to restrict access to it, a reverse proxy can be configured to accept requests only from networks in which the runners ```garm``` manages will be spun up. This URL doesn't need to be globally accessible, it just needs to be accessible by the instances.
-
-For example, in a scenario where you expose the API endpoint directly, this setting could look like the following:
-
- ```toml
- callback_url = "https://garm.example.com/api/v1/callbacks/status"
- ```
-
-Authentication is done using a short-lived JWT token, that gets generated for a particular instance that we are spinning up. That JWT token grants access to the instance to only update it's own status and to fetch metadata for itself. No other API endpoints will work with that JWT token. The validity of the token is equal to the pool bootstrap timeout value (default 20 minutes) plus the garm polling interval (5 minutes).
-
-There is a sample ```nginx``` config [in the testdata folder](/testdata/nginx-server.conf). Feel free to customize it whichever way you see fit.
-
-## The metadata_url option
-
-The metadata URL is the base URL for any information an instance may need to fetch in order to finish setting itself up. As this URL may be placed behind a reverse proxy, you'll need to configure it in the ```garm``` config file. Ultimately this URL will need to point to the following ```garm``` API endpoint:
-
- ```bash
- GET /api/v1/metadata
- ```
-
-This URL needs to be accessible only by the instances ```garm``` sets up. This URL will not be used by anyone else. To configure it in ```garm``` add the following line in the ```[default]``` section of your ```garm``` config:
-
- ```toml
- metadata_url = "https://garm.example.com/api/v1/metadata"
- ```
diff --git a/errors/errors.go b/errors/errors.go
deleted file mode 100644
index 11ebce92..00000000
--- a/errors/errors.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package errors
-
-import "fmt"
-
-var (
- // ErrUnauthorized is returned when a user does not have
- // authorization to perform a request
- ErrUnauthorized = NewUnauthorizedError("Unauthorized")
- // ErrNotFound is returned if an object is not found in
- // the database.
- ErrNotFound = NewNotFoundError("not found")
- // ErrDuplicateUser is returned when creating a user, if the
- // user already exists.
- ErrDuplicateEntity = NewDuplicateUserError("duplicate")
- // ErrBadRequest is returned is a malformed request is sent
- ErrBadRequest = NewBadRequestError("invalid request")
- // ErrTimeout is returned when a timeout occurs.
- ErrTimeout = fmt.Errorf("timed out")
- ErrUnprocessable = fmt.Errorf("cannot process request")
-)
-
-type baseError struct {
- msg string
-}
-
-func (b *baseError) Error() string {
- return b.msg
-}
-
-// NewProviderError returns a new ProviderError
-func NewProviderError(msg string, a ...interface{}) error {
- return &ProviderError{
- baseError{
- msg: fmt.Sprintf(msg, a...),
- },
- }
-}
-
-// UnauthorizedError is returned when a request is unauthorized
-type ProviderError struct {
- baseError
-}
-
-// NewMissingSecretError returns a new MissingSecretError
-func NewMissingSecretError(msg string, a ...interface{}) error {
- return &MissingSecretError{
- baseError{
- msg: fmt.Sprintf(msg, a...),
- },
- }
-}
-
-// MissingSecretError is returned the secret to validate a webhook is missing
-type MissingSecretError struct {
- baseError
-}
-
-// NewUnauthorizedError returns a new UnauthorizedError
-func NewUnauthorizedError(msg string) error {
- return &UnauthorizedError{
- baseError{
- msg: msg,
- },
- }
-}
-
-// UnauthorizedError is returned when a request is unauthorized
-type UnauthorizedError struct {
- baseError
-}
-
-// NewNotFoundError returns a new NotFoundError
-func NewNotFoundError(msg string, a ...interface{}) error {
- return &NotFoundError{
- baseError{
- msg: fmt.Sprintf(msg, a...),
- },
- }
-}
-
-// NotFoundError is returned when a resource is not found
-type NotFoundError struct {
- baseError
-}
-
-// NewDuplicateUserError returns a new DuplicateUserError
-func NewDuplicateUserError(msg string) error {
- return &DuplicateUserError{
- baseError{
- msg: msg,
- },
- }
-}
-
-// DuplicateUserError is returned when a duplicate user is requested
-type DuplicateUserError struct {
- baseError
-}
-
-// NewBadRequestError returns a new BadRequestError
-func NewBadRequestError(msg string, a ...interface{}) error {
- return &BadRequestError{
- baseError{
- msg: fmt.Sprintf(msg, a...),
- },
- }
-}
-
-// BadRequestError is returned when a malformed request is received
-type BadRequestError struct {
- baseError
-}
-
-// NewConflictError returns a new ConflictError
-func NewConflictError(msg string, a ...interface{}) error {
- return &ConflictError{
- baseError{
- msg: fmt.Sprintf(msg, a...),
- },
- }
-}
-
-// ConflictError is returned when a conflicting request is made
-type ConflictError struct {
- baseError
-}
diff --git a/go.mod b/go.mod
index 055b69bc..1ef71c9d 100644
--- a/go.mod
+++ b/go.mod
@@ -1,87 +1,88 @@
module github.com/cloudbase/garm
-go 1.20
+go 1.24.6
require (
- github.com/BurntSushi/toml v1.2.1
- github.com/go-resty/resty/v2 v2.7.0
- github.com/golang-jwt/jwt v3.2.2+incompatible
- github.com/google/go-github/v48 v48.2.0
- github.com/google/uuid v1.3.0
- github.com/gorilla/handlers v1.5.1
- github.com/gorilla/mux v1.8.0
- github.com/gorilla/websocket v1.5.0
- github.com/jedib0t/go-pretty/v6 v6.4.6
- github.com/juju/clock v1.0.3
- github.com/juju/retry v1.0.0
- github.com/lxc/lxd v0.0.0-20230325180147-8d608287b0ce
+ github.com/BurntSushi/toml v1.5.0
+ github.com/bradleyfalzon/ghinstallation/v2 v2.16.0
+ github.com/cloudbase/garm-provider-common v0.1.7
+ github.com/felixge/httpsnoop v1.0.4
+ github.com/go-openapi/errors v0.22.2
+ github.com/go-openapi/runtime v0.28.0
+ github.com/go-openapi/strfmt v0.23.0
+ github.com/go-openapi/swag v0.23.1
+ github.com/golang-jwt/jwt/v5 v5.3.0
+ github.com/google/go-github/v72 v72.0.0
+ github.com/google/uuid v1.6.0
+ github.com/gorilla/handlers v1.5.2
+ github.com/gorilla/mux v1.8.1
+ github.com/gorilla/websocket v1.5.4-0.20240702125206-a62d9d2a8413
+ github.com/jedib0t/go-pretty/v6 v6.6.8
github.com/manifoldco/promptui v0.9.0
- github.com/mattn/go-isatty v0.0.18
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354
- github.com/pkg/errors v0.9.1
- github.com/prometheus/client_golang v1.14.0
- github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b
- github.com/spf13/cobra v1.6.1
- github.com/stretchr/testify v1.8.2
- github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569
- golang.org/x/crypto v0.7.0
- golang.org/x/oauth2 v0.6.0
- golang.org/x/sync v0.1.0
- golang.org/x/sys v0.6.0
+ github.com/prometheus/client_golang v1.23.0
+ github.com/spf13/cobra v1.9.1
+ github.com/stretchr/testify v1.11.0
+ golang.org/x/crypto v0.41.0
+ golang.org/x/mod v0.27.0
+ golang.org/x/oauth2 v0.30.0
+ golang.org/x/sync v0.16.0
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
- gopkg.in/yaml.v3 v3.0.1
- gorm.io/datatypes v1.1.1
- gorm.io/driver/mysql v1.4.7
- gorm.io/driver/sqlite v1.4.4
- gorm.io/gorm v1.24.6
+ gorm.io/datatypes v1.2.6
+ gorm.io/driver/mysql v1.6.0
+ gorm.io/driver/sqlite v1.6.0
+ gorm.io/gorm v1.30.1
)
require (
+ filippo.io/edwards25519 v1.1.0 // indirect
+ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chzyer/readline v1.5.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/felixge/httpsnoop v1.0.3 // indirect
- github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 // indirect
- github.com/frankban/quicktest v1.14.3 // indirect
- github.com/go-macaroon-bakery/macaroon-bakery/v3 v3.0.1 // indirect
- github.com/go-macaroon-bakery/macaroonpb v1.0.0 // indirect
- github.com/go-sql-driver/mysql v1.7.0 // indirect
- github.com/golang/protobuf v1.5.3 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-openapi/analysis v0.23.0 // indirect
+ github.com/go-openapi/jsonpointer v0.21.2 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/loads v0.22.0 // indirect
+ github.com/go-openapi/spec v0.21.0 // indirect
+ github.com/go-openapi/validate v0.24.0 // indirect
+ github.com/go-sql-driver/mysql v1.9.3 // indirect
+ github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
- github.com/juju/errors v1.0.0 // indirect
- github.com/juju/testing v1.0.2 // indirect
- github.com/juju/webbrowser v1.0.0 // indirect
- github.com/julienschmidt/httprouter v1.3.0 // indirect
- github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
- github.com/kr/fs v0.1.0 // indirect
- github.com/kr/pretty v0.3.1 // indirect
- github.com/mattn/go-runewidth v0.0.14 // indirect
- github.com/mattn/go-sqlite3 v1.14.16 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
- github.com/pborman/uuid v1.2.1 // indirect
- github.com/pkg/sftp v1.13.5 // indirect
- github.com/pkg/xattr v0.4.9 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
+ github.com/mailru/easyjson v0.9.0 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.16 // indirect
+ github.com/mattn/go-sqlite3 v1.14.31 // indirect
+ github.com/minio/sio v0.4.1 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/oklog/ulid v1.3.1 // indirect
+ github.com/opentracing/opentracing-go v1.2.0 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_model v0.3.0 // indirect
- github.com/prometheus/common v0.42.0 // indirect
- github.com/prometheus/procfs v0.9.0 // indirect
- github.com/rivo/uniseg v0.4.4 // indirect
- github.com/robfig/cron/v3 v3.0.1 // indirect
- github.com/rogpeppe/fastuuid v1.2.0 // indirect
- github.com/sirupsen/logrus v1.9.0 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
- github.com/stretchr/objx v0.5.0 // indirect
- golang.org/x/net v0.8.0 // indirect
- golang.org/x/term v0.6.0 // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/protobuf v1.30.0 // indirect
- gopkg.in/errgo.v1 v1.0.1 // indirect
- gopkg.in/httprequest.v1 v1.2.1 // indirect
- gopkg.in/macaroon.v2 v2.1.0 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.65.0 // indirect
+ github.com/prometheus/procfs v0.16.1 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/spf13/pflag v1.0.7 // indirect
+ github.com/stretchr/objx v0.5.2 // indirect
+ github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect
+ go.mongodb.org/mongo-driver v1.17.4 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/otel v1.36.0 // indirect
+ go.opentelemetry.io/otel/metric v1.36.0 // indirect
+ go.opentelemetry.io/otel/trace v1.36.0 // indirect
+ golang.org/x/net v0.42.0 // indirect
+ golang.org/x/sys v0.35.0 // indirect
+ golang.org/x/text v0.28.0 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 4e838a1e..ef3ada85 100644
--- a/go.sum
+++ b/go.sum
@@ -1,12 +1,15 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
-github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
+github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/bradleyfalzon/ghinstallation/v2 v2.16.0 h1:B91r9bHtXp/+XRgS5aZm6ZzTdz3ahgJYmkt4xZkgDz8=
+github.com/bradleyfalzon/ghinstallation/v2 v2.16.0/go.mod h1:OeVe5ggFzoBnmgitZe/A+BqGOnv1DvU/0uiLQi1wutM=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
@@ -16,320 +19,199 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cloudbase/garm-provider-common v0.1.7 h1:V0upTejFRDiyFBO4hhkMWmPtmRTguyOt/4i1u9/rfbg=
+github.com/cloudbase/garm-provider-common v0.1.7/go.mod h1:2O51WbcfqRx5fDHyyJgIFq7KdTZZnefsM+aoOchyleU=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
-github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 h1:fmFk0Wt3bBxxwZnu48jqMdaOR/IZ4vdtJFuaFV8MpIE=
-github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3/go.mod h1:bJWSKrZyQvfTnb2OudyUjurSG4/edverV7n82+K3JiM=
-github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k=
-github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20=
-github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
-github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
-github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
-github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
-github.com/go-macaroon-bakery/macaroon-bakery/v3 v3.0.1 h1:uvQJoKTHrFFu8zxoaopNKedRzwdy3+8H72we4T/5cGs=
-github.com/go-macaroon-bakery/macaroon-bakery/v3 v3.0.1/go.mod h1:H59IYeChwvD1po3dhGUPvq5na+4NVD7SJlbhGKvslr0=
-github.com/go-macaroon-bakery/macaroonpb v1.0.0 h1:It9exBaRMZ9iix1iJ6gwzfwsDE6ExNuwtAJ9e09v6XE=
-github.com/go-macaroon-bakery/macaroonpb v1.0.0/go.mod h1:UzrGOcbiwTXISFP2XDLDPjfhMINZa+fX/7A2lMd31zc=
-github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
-github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
-github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
-github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
-github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
-github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
+github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
+github.com/go-openapi/errors v0.22.2 h1:rdxhzcBUazEcGccKqbY1Y7NS8FDcMyIRr0934jrYnZg=
+github.com/go-openapi/errors v0.22.2/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0=
+github.com/go-openapi/jsonpointer v0.21.2 h1:AqQaNADVwq/VnkCmQg6ogE+M3FOsKTytwges0JdwVuA=
+github.com/go-openapi/jsonpointer v0.21.2/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
+github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs=
+github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ=
+github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc=
+github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
+github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
+github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
+github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
+github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
+github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
+github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
+github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
+github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
+github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
+github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
+github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
+github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
+github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-github/v48 v48.2.0 h1:68puzySE6WqUY9KWmpOsDEQfDZsso98rT6pZcz9HqcE=
-github.com/google/go-github/v48 v48.2.0/go.mod h1:dDlehKBDo850ZPvCTK0sEqTCVWcrGl2LcDiajkYi89Y=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/go-github/v72 v72.0.0 h1:FcIO37BLoVPBO9igQQ6tStsv2asG4IPcYFi655PPvBM=
+github.com/google/go-github/v72 v72.0.0/go.mod h1:WWtw8GMRiL62mvIquf1kO3onRHeWWKmK01qdCY8c5fg=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
-github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
-github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
-github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
+github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
+github.com/gorilla/websocket v1.5.4-0.20240702125206-a62d9d2a8413 h1:0Zn/h+BUQg6QHkybGvjFD7BnIbjjz3oWUObacn//1Go=
+github.com/gorilla/websocket v1.5.4-0.20240702125206-a62d9d2a8413/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
-github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys=
-github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
-github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y=
-github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
-github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w=
-github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E=
-github.com/jedib0t/go-pretty/v6 v6.4.6 h1:v6aG9h6Uby3IusSSEjHaZNXpHFhzqMmjXcPq1Rjl9Jw=
-github.com/jedib0t/go-pretty/v6 v6.4.6/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA=
+github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
+github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
+github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
+github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
+github.com/jedib0t/go-pretty/v6 v6.6.8 h1:JnnzQeRz2bACBobIaa/r+nqjvws4yEhcmaZ4n1QzsEc=
+github.com/jedib0t/go-pretty/v6 v6.6.8/go.mod h1:YwC5CE4fJ1HFUDeivSV1r//AmANFHyqczZk+U6BDALU=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
-github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
-github.com/juju/clock v1.0.3 h1:yJHIsWXeU8j3QcBdiess09SzfiXRRrsjKPn2whnMeds=
-github.com/juju/clock v1.0.3/go.mod h1:HIBvJ8kiV/n7UHwKuCkdYL4l/MDECztHR2sAvWDxxf0=
-github.com/juju/errors v1.0.0 h1:yiq7kjCLll1BiaRuNY53MGI0+EQ3rF6GB+wvboZDefM=
-github.com/juju/errors v1.0.0/go.mod h1:B5x9thDqx0wIMH3+aLIMP9HjItInYWObRovoCFM5Qe8=
-github.com/juju/loggo v1.0.0 h1:Y6ZMQOGR9Aj3BGkiWx7HBbIx6zNwNkxhVNOHU2i1bl0=
-github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4=
-github.com/juju/qthttptest v0.1.3 h1:M0HdpwsK/UTHRGRcIw5zvh5z+QOgdqyK+ecDMN+swwM=
-github.com/juju/retry v1.0.0 h1:Tb1hFdDSPGLH/BGdYQOF7utQ9lA0ouVJX2imqgJK6tk=
-github.com/juju/retry v1.0.0/go.mod h1:SssN1eYeK3A2qjnFGTiVMbdzGJ2BfluaJblJXvuvgqA=
-github.com/juju/testing v1.0.2 h1:OR90RqCd9CJONxXamZAjLknpZdtqDyxqW8IwCbgw3i4=
-github.com/juju/testing v1.0.2/go.mod h1:h3Vd2rzB57KrdsBEy6R7bmSKPzP76BnNavt7i8PerwQ=
-github.com/juju/utils/v3 v3.0.0 h1:Gg3n63mGPbBuoXCo+EPJuMi44hGZfloI8nlCIebHu2Q=
-github.com/juju/webbrowser v1.0.0 h1:JLdmbFtCGY6Qf2jmS6bVaenJFGIFkdF1/BjUm76af78=
-github.com/juju/webbrowser v1.0.0/go.mod h1:RwVlbBcF91Q4vS+iwlkJ6bZTE3EwlrjbYlM3WMVD6Bc=
-github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
-github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
-github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/lxc/lxd v0.0.0-20230325180147-8d608287b0ce h1:3zb1HRvOAHOMZ8VGTDEBkKpCUVlF28zalZcb7RFjMnE=
-github.com/lxc/lxd v0.0.0-20230325180147-8d608287b0ce/go.mod h1:JJ1ShHzaOzMzU0B5TNcdI9+vq8Y45ijVeNYxE1wJ8zM=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
-github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
-github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
-github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
-github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
-github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
-github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
-github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
-github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
-github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-sqlite3 v1.14.31 h1:ldt6ghyPJsokUIlksH63gWZkG6qVGeEAu4zLeS4aVZM=
+github.com/mattn/go-sqlite3 v1.14.31/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
+github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA=
+github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA=
+github.com/minio/sio v0.4.1 h1:EMe3YBC1nf+sRQia65Rutxi+Z554XPV0dt8BIBA+a/0=
+github.com/minio/sio v0.4.1/go.mod h1:oBSjJeGbBdRMZZwna07sX9EFzZy+ywu5aofRiV1g79I=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
-github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
-github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go=
-github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg=
-github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
-github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
-github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
-github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
-github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
-github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
-github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
+github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
+github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
+github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
-github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
-github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
-github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
-github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM=
-github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
-github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
-github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
+github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
+github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
-github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8=
+github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 h1:xzABM9let0HLLqFypcxvLmlvEciCHL7+Lv+4vwZqecI=
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569/go.mod h1:2Ly+NIftZN4de9zRmENdYbvPQeaVIYKWpLFStLFEBgI=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
-golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
-golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw=
-golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
+go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
+go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
+go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
+go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
+go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
+go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
+go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
+go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
+golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
+golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
+golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
+golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
+golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
-golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
-google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY=
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk=
-gopkg.in/errgo.v1 v1.0.1 h1:oQFRXzZ7CkBGdm1XZm/EbQYaYNNEElNBOd09M6cqNso=
-gopkg.in/errgo.v1 v1.0.1/go.mod h1:3NjfXwocQRYAPTq4/fzX+CwUhPRcR/azYRhj8G+LqMo=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/httprequest.v1 v1.2.1 h1:pEPLMdF/gjWHnKxLpuCYaHFjc8vAB2wrYjXrqDVC16E=
-gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM=
-gopkg.in/macaroon.v2 v2.1.0 h1:HZcsjBCzq9t0eBPMKqTN/uSN6JOm78ZJ2INbqcBQOUI=
-gopkg.in/macaroon.v2 v2.1.0/go.mod h1:OUb+TQP/OP0WOerC2Jp/3CwhIKyIa9kQjuc7H24e6/o=
-gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=
-gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
-gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gorm.io/datatypes v1.1.1 h1:XAjO7NNfUKVUvnS3+BkqMrPXxCAcxDlpOYbjnizxNCw=
-gorm.io/datatypes v1.1.1/go.mod h1:u8GEgFjJ+GpsGfgHmBUcQqHm/937t3sj/SO9dvbndTg=
-gorm.io/driver/mysql v1.4.7 h1:rY46lkCspzGHn7+IYsNpSfEv9tA+SU4SkkB+GFX125Y=
-gorm.io/driver/mysql v1.4.7/go.mod h1:SxzItlnT1cb6e1e4ZRpgJN2VYtcqJgqnHxWr4wsP8oc=
-gorm.io/driver/postgres v1.4.5 h1:mTeXTTtHAgnS9PgmhN2YeUbazYpLhUI1doLnw42XUZc=
-gorm.io/driver/sqlite v1.4.4 h1:gIufGoR0dQzjkyqDyYSCvsYR6fba1Gw5YKDqKeChxFc=
-gorm.io/driver/sqlite v1.4.4/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI=
-gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0=
-gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
-gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=
-gorm.io/gorm v1.24.6 h1:wy98aq9oFEetsc4CAbKD2SoBCdMzsbSIvSUUFJuHi5s=
-gorm.io/gorm v1.24.6/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+gorm.io/datatypes v1.2.6 h1:KafLdXvFUhzNeL2ncm03Gl3eTLONQfNKZ+wJ+9Y4Nck=
+gorm.io/datatypes v1.2.6/go.mod h1:M2iO+6S3hhi4nAyYe444Pcb0dcIiOMJ7QHaUXxyiNZY=
+gorm.io/driver/mysql v1.6.0 h1:eNbLmNTpPpTOVZi8MMxCi2aaIm0ZpInbORNXDwyLGvg=
+gorm.io/driver/mysql v1.6.0/go.mod h1:D/oCC2GWK3M/dqoLxnOlaNKmXz8WNTfcS9y5ovaSqKo=
+gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
+gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
+gorm.io/driver/sqlite v1.6.0 h1:WHRRrIiulaPiPFmDcod6prc4l2VGVWHz80KspNsxSfQ=
+gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8=
+gorm.io/driver/sqlserver v1.6.0 h1:VZOBQVsVhkHU/NzNhRJKoANt5pZGQAS1Bwc6m6dgfnc=
+gorm.io/driver/sqlserver v1.6.0/go.mod h1:WQzt4IJo/WHKnckU9jXBLMJIVNMVeTu25dnOzehntWw=
+gorm.io/gorm v1.30.1 h1:lSHg33jJTBxs2mgJRfRZeLDG+WZaHYCk3Wtfl6Ngzo4=
+gorm.io/gorm v1.30.1/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
diff --git a/internal/testing/mock_watcher.go b/internal/testing/mock_watcher.go
new file mode 100644
index 00000000..112f0de5
--- /dev/null
+++ b/internal/testing/mock_watcher.go
@@ -0,0 +1,66 @@
+//go:build testing
+// +build testing
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package testing
+
+import (
+ "context"
+
+ "github.com/cloudbase/garm/database/common"
+)
+
+type MockWatcher struct{}
+
+func (w *MockWatcher) RegisterProducer(_ context.Context, _ string) (common.Producer, error) {
+ return &MockProducer{}, nil
+}
+
+func (w *MockWatcher) RegisterConsumer(_ context.Context, _ string, _ ...common.PayloadFilterFunc) (common.Consumer, error) {
+ return &MockConsumer{}, nil
+}
+
+func (w *MockWatcher) Close() {
+}
+
+type MockProducer struct{}
+
+func (p *MockProducer) Notify(_ common.ChangePayload) error {
+ return nil
+}
+
+func (p *MockProducer) IsClosed() bool {
+ return false
+}
+
+func (p *MockProducer) Close() {
+}
+
+type MockConsumer struct{}
+
+func (c *MockConsumer) Watch() <-chan common.ChangePayload {
+ return nil
+}
+
+func (c *MockConsumer) SetFilters(_ ...common.PayloadFilterFunc) {
+}
+
+func (c *MockConsumer) Close() {
+}
+
+func (c *MockConsumer) IsClosed() bool {
+ return false
+}
diff --git a/internal/testing/testing.go b/internal/testing/testing.go
index 754a799a..38725882 100644
--- a/internal/testing/testing.go
+++ b/internal/testing/testing.go
@@ -18,19 +18,180 @@
package testing
import (
+ "context"
+ "errors"
+ "fmt"
"os"
"path/filepath"
"sort"
"testing"
- "github.com/cloudbase/garm/config"
-
"github.com/stretchr/testify/require"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/config"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/util/appdefaults"
)
-var (
- encryptionPassphrase = "bocyasicgatEtenOubwonIbsudNutDom"
-)
+//nolint:golangci-lint,gosec
+var encryptionPassphrase = "bocyasicgatEtenOubwonIbsudNutDom"
+
+func ImpersonateAdminContext(ctx context.Context, db common.Store, s *testing.T) context.Context {
+ adminUser, err := db.GetAdminUser(ctx)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.Fatalf("failed to get admin user: %v", err)
+ }
+ newUserParams := params.NewUserParams{
+ Email: "admin@localhost",
+ Username: "admin",
+ Password: "superSecretAdminPassword@123",
+ IsAdmin: true,
+ Enabled: true,
+ }
+ adminUser, err = db.CreateUser(ctx, newUserParams)
+ if err != nil {
+ s.Fatalf("failed to create admin user: %v", err)
+ }
+ }
+ ctx = auth.PopulateContext(ctx, adminUser, nil)
+ return ctx
+}
+
+func CreateGARMTestUser(ctx context.Context, username string, db common.Store, s *testing.T) params.User {
+ newUserParams := params.NewUserParams{
+ Email: fmt.Sprintf("%s@localhost", username),
+ Username: username,
+ Password: "superSecretPassword@123",
+ IsAdmin: false,
+ Enabled: true,
+ }
+
+ user, err := db.CreateUser(ctx, newUserParams)
+ if err != nil {
+ if errors.Is(err, runnerErrors.ErrDuplicateEntity) {
+ user, err = db.GetUser(ctx, newUserParams.Username)
+ if err != nil {
+ s.Fatalf("failed to get user by email: %v", err)
+ }
+ return user
+ }
+ s.Fatalf("failed to create user: %v", err)
+ }
+
+ return user
+}
+
+func CreateGHESEndpoint(ctx context.Context, db common.Store, s *testing.T) params.ForgeEndpoint {
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "ghes.example.com",
+ Description: "GHES endpoint",
+ APIBaseURL: "https://ghes.example.com",
+ UploadBaseURL: "https://upload.ghes.example.com/",
+ BaseURL: "https://ghes.example.com",
+ }
+
+ ep, err := db.GetGithubEndpoint(ctx, endpointParams.Name)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.Fatalf("failed to get database object (%s): %v", endpointParams.Name, err)
+ }
+ ep, err = db.CreateGithubEndpoint(ctx, endpointParams)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrDuplicateEntity) {
+ s.Fatalf("failed to create database object (%s): %v", endpointParams.Name, err)
+ }
+ }
+ }
+
+ return ep
+}
+
+func CreateDefaultGithubEndpoint(ctx context.Context, db common.Store, s *testing.T) params.ForgeEndpoint {
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "github.com",
+ Description: "github endpoint",
+ APIBaseURL: appdefaults.GithubDefaultBaseURL,
+ UploadBaseURL: appdefaults.GithubDefaultUploadBaseURL,
+ BaseURL: appdefaults.DefaultGithubURL,
+ }
+
+ ep, err := db.GetGithubEndpoint(ctx, endpointParams.Name)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.Fatalf("failed to get database object (github.com): %v", err)
+ }
+ ep, err = db.CreateGithubEndpoint(ctx, endpointParams)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrDuplicateEntity) {
+ s.Fatalf("failed to create database object (github.com): %v", err)
+ }
+ }
+ }
+
+ return ep
+}
+
+func CreateDefaultGiteaEndpoint(ctx context.Context, db common.Store, s *testing.T) params.ForgeEndpoint {
+ endpointParams := params.CreateGiteaEndpointParams{
+ Name: "gitea.example.com",
+ Description: "gitea endpoint",
+ APIBaseURL: "https://gitea.example.com/",
+ BaseURL: "https://gitea.example.com/",
+ }
+
+ ep, err := db.GetGithubEndpoint(ctx, endpointParams.Name)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.Fatalf("failed to get database object (github.com): %v", err)
+ }
+ ep, err = db.CreateGiteaEndpoint(ctx, endpointParams)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrDuplicateEntity) {
+ s.Fatalf("failed to create database object (github.com): %v", err)
+ }
+ }
+ }
+
+ return ep
+}
+
+func CreateTestGithubCredentials(ctx context.Context, credsName string, db common.Store, s *testing.T, endpoint params.ForgeEndpoint) params.ForgeCredentials {
+ newCredsParams := params.CreateGithubCredentialsParams{
+ Name: credsName,
+ Description: "Test creds",
+ AuthType: params.ForgeAuthTypePAT,
+ Endpoint: endpoint.Name,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-token",
+ },
+ }
+ newCreds, err := db.CreateGithubCredentials(ctx, newCredsParams)
+ if err != nil {
+ s.Fatalf("failed to create database object (%s): %v", credsName, err)
+ }
+ return newCreds
+}
+
+func CreateTestGiteaCredentials(ctx context.Context, credsName string, db common.Store, s *testing.T, endpoint params.ForgeEndpoint) params.ForgeCredentials {
+ newCredsParams := params.CreateGiteaCredentialsParams{
+ Name: credsName,
+ Description: "Test creds",
+ AuthType: params.ForgeAuthTypePAT,
+ Endpoint: endpoint.Name,
+ PAT: params.GithubPAT{
+ OAuth2Token: "test-token",
+ },
+ }
+ newCreds, err := db.CreateGiteaCredentials(ctx, newCredsParams)
+ if err != nil {
+ s.Fatalf("failed to create database object (%s): %v", credsName, err)
+ }
+ return newCreds
+}
func GetTestSqliteDBConfig(t *testing.T) config.Database {
dir, err := os.MkdirTemp("", "garm-config-test")
@@ -58,6 +219,10 @@ type NameAndIDDBEntity interface {
GetName() string
}
+func Ptr[T any](v T) *T {
+ return &v
+}
+
func EqualDBEntityByName[T NameAndIDDBEntity](t *testing.T, expected, actual []T) {
require.Equal(t, len(expected), len(actual))
diff --git a/locking/interface.go b/locking/interface.go
new file mode 100644
index 00000000..43ed1737
--- /dev/null
+++ b/locking/interface.go
@@ -0,0 +1,31 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package locking
+
+import "time"
+
+type Locker interface {
+ TryLock(key, identifier string) bool
+ Lock(key, identifier string)
+ LockedBy(key string) (string, bool)
+ Unlock(key string, remove bool)
+ Delete(key string)
+}
+
+type InstanceDeleteBackoff interface {
+ ShouldProcess(key string) (bool, time.Time)
+ Delete(key string)
+ RecordFailure(key string)
+}
diff --git a/locking/local_backoff_locker.go b/locking/local_backoff_locker.go
new file mode 100644
index 00000000..93344566
--- /dev/null
+++ b/locking/local_backoff_locker.go
@@ -0,0 +1,77 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package locking
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/cloudbase/garm/runner/common"
+)
+
+func NewInstanceDeleteBackoff(_ context.Context) (InstanceDeleteBackoff, error) {
+ return &instanceDeleteBackoff{}, nil
+}
+
+type instanceBackOff struct {
+ backoffSeconds float64
+ lastRecordedFailureTime time.Time
+ mux sync.Mutex
+}
+
+type instanceDeleteBackoff struct {
+ muxes sync.Map
+}
+
+func (i *instanceDeleteBackoff) ShouldProcess(key string) (bool, time.Time) {
+ backoff, loaded := i.muxes.LoadOrStore(key, &instanceBackOff{})
+ if !loaded {
+ return true, time.Time{}
+ }
+
+ ib := backoff.(*instanceBackOff)
+ ib.mux.Lock()
+ defer ib.mux.Unlock()
+
+ if ib.lastRecordedFailureTime.IsZero() || ib.backoffSeconds == 0 {
+ return true, time.Time{}
+ }
+
+ now := time.Now().UTC()
+ deadline := ib.lastRecordedFailureTime.Add(time.Duration(ib.backoffSeconds) * time.Second)
+ return now.After(deadline), deadline
+}
+
+func (i *instanceDeleteBackoff) Delete(key string) {
+ i.muxes.Delete(key)
+}
+
+func (i *instanceDeleteBackoff) RecordFailure(key string) {
+ backoff, _ := i.muxes.LoadOrStore(key, &instanceBackOff{})
+ ib := backoff.(*instanceBackOff)
+ ib.mux.Lock()
+ defer ib.mux.Unlock()
+
+ ib.lastRecordedFailureTime = time.Now().UTC()
+ if ib.backoffSeconds == 0 {
+ ib.backoffSeconds = common.PoolConsilitationInterval.Seconds()
+ } else {
+ // Geometric progression of 1.5
+ newBackoff := ib.backoffSeconds * 1.5
+ // Cap the backoff to 20 minutes
+ ib.backoffSeconds = min(newBackoff, maxBackoffSeconds)
+ }
+}
diff --git a/locking/local_backoff_locker_test.go b/locking/local_backoff_locker_test.go
new file mode 100644
index 00000000..00fe09c8
--- /dev/null
+++ b/locking/local_backoff_locker_test.go
@@ -0,0 +1,89 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package locking
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/suite"
+)
+
+type LockerBackoffTestSuite struct {
+ suite.Suite
+
+ locker *instanceDeleteBackoff
+}
+
+func (l *LockerBackoffTestSuite) SetupTest() {
+ l.locker = &instanceDeleteBackoff{}
+}
+
+func (l *LockerBackoffTestSuite) TearDownTest() {
+ l.locker = nil
+}
+
+func (l *LockerBackoffTestSuite) TestShouldProcess() {
+ shouldProcess, deadline := l.locker.ShouldProcess("test")
+ l.Require().True(shouldProcess)
+ l.Require().Equal(time.Time{}, deadline)
+
+ l.locker.muxes.Store("test", &instanceBackOff{
+ backoffSeconds: 0,
+ lastRecordedFailureTime: time.Time{},
+ })
+
+ shouldProcess, deadline = l.locker.ShouldProcess("test")
+ l.Require().True(shouldProcess)
+ l.Require().Equal(time.Time{}, deadline)
+
+ l.locker.muxes.Store("test", &instanceBackOff{
+ backoffSeconds: 100,
+ lastRecordedFailureTime: time.Now().UTC(),
+ })
+
+ shouldProcess, deadline = l.locker.ShouldProcess("test")
+ l.Require().False(shouldProcess)
+ l.Require().NotEqual(time.Time{}, deadline)
+}
+
+func (l *LockerBackoffTestSuite) TestRecordFailure() {
+ l.locker.RecordFailure("test")
+
+ mux, ok := l.locker.muxes.Load("test")
+ l.Require().True(ok)
+ ib := mux.(*instanceBackOff)
+ l.Require().NotNil(ib)
+ l.Require().NotEqual(time.Time{}, ib.lastRecordedFailureTime)
+ l.Require().Equal(float64(5), ib.backoffSeconds)
+
+ l.locker.RecordFailure("test")
+ mux, ok = l.locker.muxes.Load("test")
+ l.Require().True(ok)
+ ib = mux.(*instanceBackOff)
+ l.Require().NotNil(ib)
+ l.Require().NotEqual(time.Time{}, ib.lastRecordedFailureTime)
+ l.Require().Equal(7.5, ib.backoffSeconds)
+
+ l.locker.Delete("test")
+ mux, ok = l.locker.muxes.Load("test")
+ l.Require().False(ok)
+ l.Require().Nil(mux)
+}
+
+func TestBackoffTestSuite(t *testing.T) {
+ t.Parallel()
+ suite.Run(t, new(LockerBackoffTestSuite))
+}
diff --git a/locking/local_locker.go b/locking/local_locker.go
new file mode 100644
index 00000000..312d85ec
--- /dev/null
+++ b/locking/local_locker.go
@@ -0,0 +1,92 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package locking
+
+import (
+ "context"
+ "sync"
+
+ dbCommon "github.com/cloudbase/garm/database/common"
+)
+
+const (
+ maxBackoffSeconds float64 = 1200 // 20 minutes
+)
+
+func NewLocalLocker(_ context.Context, _ dbCommon.Store) (Locker, error) {
+ return &keyMutex{}, nil
+}
+
+type keyMutex struct {
+ muxes sync.Map
+}
+
+type lockWithIdent struct {
+ mux sync.Mutex
+ ident string
+}
+
+var _ Locker = &keyMutex{}
+
+func (k *keyMutex) TryLock(key, identifier string) bool {
+ mux, _ := k.muxes.LoadOrStore(key, &lockWithIdent{
+ mux: sync.Mutex{},
+ })
+ keyMux := mux.(*lockWithIdent)
+ locked := keyMux.mux.TryLock()
+ if locked {
+ keyMux.ident = identifier
+ }
+ return locked
+}
+
+func (k *keyMutex) Lock(key, identifier string) {
+ mux, _ := k.muxes.LoadOrStore(key, &lockWithIdent{
+ mux: sync.Mutex{},
+ })
+ keyMux := mux.(*lockWithIdent)
+ keyMux.ident = identifier
+ keyMux.mux.Lock()
+}
+
+func (k *keyMutex) Unlock(key string, remove bool) {
+ mux, ok := k.muxes.Load(key)
+ if !ok {
+ return
+ }
+ keyMux := mux.(*lockWithIdent)
+ if remove {
+ k.Delete(key)
+ }
+ keyMux.ident = ""
+ keyMux.mux.Unlock()
+}
+
+func (k *keyMutex) Delete(key string) {
+ k.muxes.Delete(key)
+}
+
+func (k *keyMutex) LockedBy(key string) (string, bool) {
+ mux, ok := k.muxes.Load(key)
+ if !ok {
+ return "", false
+ }
+ keyMux := mux.(*lockWithIdent)
+ if keyMux.ident == "" {
+ return "", false
+ }
+
+ return keyMux.ident, true
+}
diff --git a/locking/local_locker_test.go b/locking/local_locker_test.go
new file mode 100644
index 00000000..75b4dac0
--- /dev/null
+++ b/locking/local_locker_test.go
@@ -0,0 +1,241 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package locking
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+)
+
+type LockerTestSuite struct {
+ suite.Suite
+
+ mux *keyMutex
+}
+
+func (l *LockerTestSuite) SetupTest() {
+ l.mux = &keyMutex{}
+ err := RegisterLocker(l.mux)
+ l.Require().NoError(err, "should register the locker")
+}
+
+func (l *LockerTestSuite) TearDownTest() {
+ l.mux = nil
+ locker = nil
+}
+
+func (l *LockerTestSuite) TestLocalLockerLockUnlock() {
+ l.mux.Lock("test", "test-identifier")
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+ l.mux.Unlock("test", true)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().False(ok)
+ l.Require().Nil(mux)
+ l.mux.Unlock("test", false)
+}
+
+func (l *LockerTestSuite) TestLocalLockerTryLock() {
+ locked := l.mux.TryLock("test", "test-identifier")
+ l.Require().True(locked)
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ locked = l.mux.TryLock("test", "another-identifier2")
+ l.Require().False(locked)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux = mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ l.mux.Unlock("test", true)
+ locked = l.mux.TryLock("test", "another-identifier2")
+ l.Require().True(locked)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux = mux.(*lockWithIdent)
+ l.Require().Equal("another-identifier2", keyMux.ident)
+ l.mux.Unlock("test", true)
+}
+
+func (l *LockerTestSuite) TestLocalLockertLockedBy() {
+ l.mux.Lock("test", "test-identifier")
+ identifier, ok := l.mux.LockedBy("test")
+ l.Require().True(ok)
+ l.Require().Equal("test-identifier", identifier)
+ l.mux.Unlock("test", true)
+ identifier, ok = l.mux.LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+
+ l.mux.Lock("test", "test-identifier")
+ identifier, ok = l.mux.LockedBy("test")
+ l.Require().True(ok)
+ l.Require().Equal("test-identifier", identifier)
+ l.mux.Unlock("test", false)
+ identifier, ok = l.mux.LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+}
+
+func (l *LockerTestSuite) TestLockerPanicsIfNotInitialized() {
+ locker = nil
+ l.Require().Panics(
+ func() {
+ Lock("test", "test-identifier")
+ },
+ "Lock should panic if locker is not initialized",
+ )
+
+ l.Require().Panics(
+ func() {
+ TryLock("test", "test-identifier")
+ },
+ "TryLock should panic if locker is not initialized",
+ )
+
+ l.Require().Panics(
+ func() {
+ Unlock("test", false)
+ },
+ "Unlock should panic if locker is not initialized",
+ )
+
+ l.Require().Panics(
+ func() {
+ Delete("test")
+ },
+ "Delete should panic if locker is not initialized",
+ )
+
+ l.Require().Panics(
+ func() {
+ LockedBy("test")
+ },
+ "LockedBy should panic if locker is not initialized",
+ )
+}
+
+func (l *LockerTestSuite) TestLockerAlreadyRegistered() {
+ err := RegisterLocker(l.mux)
+ l.Require().Error(err, "should not be able to register the same locker again")
+ l.Require().Equal("locker already registered", err.Error())
+}
+
+func (l *LockerTestSuite) TestLockerDelete() {
+ Lock("test", "test-identifier")
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ Delete("test")
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().False(ok)
+ l.Require().Nil(mux)
+
+ identifier, ok := l.mux.LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+}
+
+func (l *LockerTestSuite) TestLockUnlock() {
+ Lock("test", "test-identifier")
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ Unlock("test", true)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().False(ok)
+ l.Require().Nil(mux)
+
+ identifier, ok := l.mux.LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+}
+
+func (l *LockerTestSuite) TestLockUnlockWithoutRemove() {
+ Lock("test", "test-identifier")
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ Unlock("test", false)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux = mux.(*lockWithIdent)
+ l.Require().Equal("", keyMux.ident)
+
+ identifier, ok := l.mux.LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+}
+
+func (l *LockerTestSuite) TestTryLock() {
+ locked := TryLock("test", "test-identifier")
+ l.Require().True(locked)
+ mux, ok := l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux := mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ locked = TryLock("test", "another-identifier2")
+ l.Require().False(locked)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux = mux.(*lockWithIdent)
+ l.Require().Equal("test-identifier", keyMux.ident)
+
+ Unlock("test", true)
+ locked = TryLock("test", "another-identifier2")
+ l.Require().True(locked)
+ mux, ok = l.mux.muxes.Load("test")
+ l.Require().True(ok)
+ keyMux = mux.(*lockWithIdent)
+ l.Require().Equal("another-identifier2", keyMux.ident)
+ Unlock("test", true)
+}
+
+func (l *LockerTestSuite) TestLockedBy() {
+ Lock("test", "test-identifier")
+ identifier, ok := LockedBy("test")
+ l.Require().True(ok)
+ l.Require().Equal("test-identifier", identifier)
+ Unlock("test", true)
+ identifier, ok = LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+
+ Lock("test", "test-identifier2")
+ identifier, ok = LockedBy("test")
+ l.Require().True(ok)
+ l.Require().Equal("test-identifier2", identifier)
+ Unlock("test", false)
+ identifier, ok = LockedBy("test")
+ l.Require().False(ok)
+ l.Require().Equal("", identifier)
+}
+
+func TestLockerTestSuite(t *testing.T) {
+ t.Parallel()
+ suite.Run(t, new(LockerTestSuite))
+}
diff --git a/locking/locking.go b/locking/locking.go
new file mode 100644
index 00000000..312d2e6a
--- /dev/null
+++ b/locking/locking.go
@@ -0,0 +1,90 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package locking
+
+import (
+ "fmt"
+ "log/slog"
+ "runtime"
+ "sync"
+)
+
+var locker Locker
+
+var lockerMux = sync.Mutex{}
+
+func TryLock(key, identifier string) (ok bool) {
+ if locker == nil {
+ panic("no locker is registered")
+ }
+
+ _, filename, line, _ := runtime.Caller(1)
+ slog.Debug("attempting to try lock", "key", key, "identifier", identifier, "caller", fmt.Sprintf("%s:%d", filename, line))
+ defer slog.Debug("try lock returned", "key", key, "identifier", identifier, "locked", ok, "caller", fmt.Sprintf("%s:%d", filename, line))
+
+ ok = locker.TryLock(key, identifier)
+ return ok
+}
+
+func Lock(key, identifier string) {
+ if locker == nil {
+ panic("no locker is registered")
+ }
+
+ _, filename, line, _ := runtime.Caller(1)
+ slog.Debug("attempting to lock", "key", key, "identifier", identifier, "caller", fmt.Sprintf("%s:%d", filename, line))
+ defer slog.Debug("lock acquired", "key", key, "identifier", identifier, "caller", fmt.Sprintf("%s:%d", filename, line))
+
+ locker.Lock(key, identifier)
+}
+
+func Unlock(key string, remove bool) {
+ if locker == nil {
+ panic("no locker is registered")
+ }
+
+ _, filename, line, _ := runtime.Caller(1)
+ slog.Debug("attempting to unlock", "key", key, "remove", remove, "caller", fmt.Sprintf("%s:%d", filename, line))
+ defer slog.Debug("unlock completed", "key", key, "remove", remove, "caller", fmt.Sprintf("%s:%d", filename, line))
+ locker.Unlock(key, remove)
+}
+
+func LockedBy(key string) (string, bool) {
+ if locker == nil {
+ panic("no locker is registered")
+ }
+
+ return locker.LockedBy(key)
+}
+
+func Delete(key string) {
+ if locker == nil {
+ panic("no locker is registered")
+ }
+
+ locker.Delete(key)
+}
+
+func RegisterLocker(lock Locker) error {
+ lockerMux.Lock()
+ defer lockerMux.Unlock()
+
+ if locker != nil {
+ return fmt.Errorf("locker already registered")
+ }
+
+ locker = lock
+ return nil
+}
diff --git a/metrics/enterprise.go b/metrics/enterprise.go
new file mode 100644
index 00000000..882b64df
--- /dev/null
+++ b/metrics/enterprise.go
@@ -0,0 +1,35 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ EnterpriseInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsEnterpriseSubsystem,
+ Name: "info",
+ Help: "Info of the enterprise",
+ }, []string{"name", "id"})
+
+ EnterprisePoolManagerStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsEnterpriseSubsystem,
+ Name: "pool_manager_status",
+ Help: "Status of the enterprise pool manager",
+ }, []string{"name", "id", "running"})
+)
diff --git a/metrics/github.go b/metrics/github.go
new file mode 100644
index 00000000..0d6f5fa7
--- /dev/null
+++ b/metrics/github.go
@@ -0,0 +1,33 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ GithubOperationCount = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsGithubSubsystem,
+ Name: "operations_total",
+ Help: "Total number of github operation attempts",
+ }, []string{"operation", "scope"})
+
+ GithubOperationFailedCount = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsGithubSubsystem,
+ Name: "errors_total",
+ Help: "Total number of failed github operation attempts",
+ }, []string{"operation", "scope"})
+)
diff --git a/metrics/health.go b/metrics/health.go
new file mode 100644
index 00000000..13194231
--- /dev/null
+++ b/metrics/health.go
@@ -0,0 +1,25 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var GarmHealth = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Name: "health",
+ Help: "Health of the garm",
+}, []string{"metadata_url", "callback_url", "webhook_url", "controller_webhook_url", "controller_id"})
diff --git a/metrics/instance.go b/metrics/instance.go
new file mode 100644
index 00000000..b9d7e1cf
--- /dev/null
+++ b/metrics/instance.go
@@ -0,0 +1,42 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ InstanceStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsRunnerSubsystem,
+ Name: "status",
+ Help: "Status of the instance",
+ }, []string{"name", "status", "runner_status", "pool_owner", "pool_type", "pool_id", "provider"})
+
+ InstanceOperationCount = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsRunnerSubsystem,
+ Name: "operations_total",
+ Help: "Total number of instance operation attempts",
+ }, []string{"operation", "provider"})
+
+ InstanceOperationFailedCount = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsRunnerSubsystem,
+ Name: "errors_total",
+ Help: "Total number of failed instance operation attempts",
+ }, []string{"operation", "provider"})
+)
diff --git a/metrics/metrics.go b/metrics/metrics.go
index 04e218a6..1a566116 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -1,184 +1,82 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package metrics
import (
- "log"
-
- "github.com/cloudbase/garm/auth"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner"
-
- "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
)
-var webhooksReceived *prometheus.CounterVec = nil
+const (
+ metricsNamespace = "garm"
+ metricsRunnerSubsystem = "runner"
+ metricsPoolSubsystem = "pool"
+ metricsProviderSubsystem = "provider"
+ metricsOrganizationSubsystem = "organization"
+ metricsRepositorySubsystem = "repository"
+ metricsEnterpriseSubsystem = "enterprise"
+ metricsWebhookSubsystem = "webhook"
+ metricsGithubSubsystem = "github"
+)
-// RecordWebhookWithLabels will increment a webhook metric identified by specific
-// values. If metrics are disabled, this function is a noop.
-func RecordWebhookWithLabels(lvs ...string) error {
- if webhooksReceived == nil {
- // not registered. Noop
- return nil
- }
+// RegisterMetrics registers all the metrics
+func RegisterMetrics() error {
+ var collectors []prometheus.Collector
+ collectors = append(collectors,
- counter, err := webhooksReceived.GetMetricWithLabelValues(lvs...)
- if err != nil {
- return errors.Wrap(err, "recording metric")
- }
- counter.Inc()
- return nil
-}
+ // metrics created during the periodically update of the metrics
+ //
+ // runner metrics
+ InstanceStatus,
+ // organization metrics
+ OrganizationInfo,
+ OrganizationPoolManagerStatus,
+ // enterprise metrics
+ EnterpriseInfo,
+ EnterprisePoolManagerStatus,
+ // repository metrics
+ RepositoryInfo,
+ RepositoryPoolManagerStatus,
+ // provider metrics
+ ProviderInfo,
+ // pool metrics
+ PoolInfo,
+ PoolStatus,
+ PoolMaxRunners,
+ PoolMinIdleRunners,
+ PoolBootstrapTimeout,
+ // health metrics
+ GarmHealth,
-func RegisterCollectors(runner *runner.Runner) error {
- if webhooksReceived != nil {
- // Already registered.
- return nil
- }
-
- garmCollector, err := NewGarmCollector(runner)
- if err != nil {
- return errors.Wrap(err, "getting collector")
- }
-
- if err := prometheus.Register(garmCollector); err != nil {
- return errors.Wrap(err, "registering collector")
- }
-
- // metric to count total webhooks received
- // at this point the webhook is not yet authenticated and
- // we don't know if it's meant for us or not
- webhooksReceived = prometheus.NewCounterVec(prometheus.CounterOpts{
- Name: "garm_webhooks_received",
- Help: "The total number of webhooks received",
- }, []string{"valid", "reason", "hostname", "controller_id"})
-
- err = prometheus.Register(webhooksReceived)
- if err != nil {
- return errors.Wrap(err, "registering webhooks recv counter")
- }
- return nil
-}
-
-func NewGarmCollector(r *runner.Runner) (*GarmCollector, error) {
- controllerInfo, err := r.GetControllerInfo(auth.GetAdminContext())
- if err != nil {
- return nil, errors.Wrap(err, "fetching controller info")
- }
- return &GarmCollector{
- runner: r,
- instanceMetric: prometheus.NewDesc(
- "garm_runner_status",
- "Status of the runner",
- []string{"name", "status", "runner_status", "pool_owner", "pool_type", "pool_id", "hostname", "controller_id"}, nil,
- ),
- healthMetric: prometheus.NewDesc(
- "garm_health",
- "Health of the runner",
- []string{"hostname", "controller_id"}, nil,
- ),
- cachedControllerInfo: controllerInfo,
- }, nil
-}
-
-type GarmCollector struct {
- healthMetric *prometheus.Desc
- instanceMetric *prometheus.Desc
- runner *runner.Runner
- cachedControllerInfo params.ControllerInfo
-}
-
-func (c *GarmCollector) Describe(ch chan<- *prometheus.Desc) {
- ch <- c.instanceMetric
- ch <- c.healthMetric
-}
-
-func (c *GarmCollector) Collect(ch chan<- prometheus.Metric) {
- controllerInfo, err := c.runner.GetControllerInfo(auth.GetAdminContext())
- if err != nil {
- log.Printf("failed to get controller info: %s", err)
- return
- }
- c.CollectInstanceMetric(ch, controllerInfo.Hostname, controllerInfo.ControllerID.String())
- c.CollectHealthMetric(ch, controllerInfo.Hostname, controllerInfo.ControllerID.String())
-}
-
-func (c *GarmCollector) CollectHealthMetric(ch chan<- prometheus.Metric, hostname string, controllerID string) {
- m, err := prometheus.NewConstMetric(
- c.healthMetric,
- prometheus.GaugeValue,
- 1,
- hostname,
- controllerID,
+ // metrics used within normal garm operations
+ // e.g. count instance creations, count github api calls, ...
+ //
+ // runner instances
+ InstanceOperationCount,
+ InstanceOperationFailedCount,
+ // github
+ GithubOperationCount,
+ GithubOperationFailedCount,
+ // webhook metrics
+ WebhooksReceived,
)
- if err != nil {
- log.Printf("error on creating health metric: %s", err)
- return
- }
- ch <- m
-}
-// CollectInstanceMetric collects the metrics for the runner instances
-// reflecting the statuses and the pool they belong to.
-func (c *GarmCollector) CollectInstanceMetric(ch chan<- prometheus.Metric, hostname string, controllerID string) {
- ctx := auth.GetAdminContext()
-
- instances, err := c.runner.ListAllInstances(ctx)
- if err != nil {
- log.Printf("cannot collect metrics, listing instances: %s", err)
- return
- }
-
- pools, err := c.runner.ListAllPools(ctx)
- if err != nil {
- log.Printf("listing pools: %s", err)
- // continue anyway
- }
-
- type poolInfo struct {
- Name string
- Type string
- }
-
- poolNames := make(map[string]poolInfo)
- for _, pool := range pools {
- if pool.EnterpriseName != "" {
- poolNames[pool.ID] = poolInfo{
- Name: pool.EnterpriseName,
- Type: string(pool.PoolType()),
- }
- } else if pool.OrgName != "" {
- poolNames[pool.ID] = poolInfo{
- Name: pool.OrgName,
- Type: string(pool.PoolType()),
- }
- } else {
- poolNames[pool.ID] = poolInfo{
- Name: pool.RepoName,
- Type: string(pool.PoolType()),
- }
+ for _, c := range collectors {
+ if err := prometheus.Register(c); err != nil {
+ return err
}
}
- for _, instance := range instances {
-
- m, err := prometheus.NewConstMetric(
- c.instanceMetric,
- prometheus.GaugeValue,
- 1,
- instance.Name,
- string(instance.Status),
- string(instance.RunnerStatus),
- poolNames[instance.PoolID].Name,
- poolNames[instance.PoolID].Type,
- instance.PoolID,
- hostname,
- controllerID,
- )
-
- if err != nil {
- log.Printf("cannot collect metrics, creating metric: %s", err)
- continue
- }
- ch <- m
- }
+ return nil
}
diff --git a/metrics/organization.go b/metrics/organization.go
new file mode 100644
index 00000000..d04e7a4e
--- /dev/null
+++ b/metrics/organization.go
@@ -0,0 +1,35 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ OrganizationInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsOrganizationSubsystem,
+ Name: "info",
+ Help: "Info of the organization",
+ }, []string{"name", "id"})
+
+ OrganizationPoolManagerStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsOrganizationSubsystem,
+ Name: "pool_manager_status",
+ Help: "Status of the organization pool manager",
+ }, []string{"name", "id", "running"})
+)
diff --git a/metrics/pool.go b/metrics/pool.go
new file mode 100644
index 00000000..fc6f2520
--- /dev/null
+++ b/metrics/pool.go
@@ -0,0 +1,56 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ PoolInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsPoolSubsystem,
+ Name: "info",
+ Help: "Info of the pool",
+ }, []string{"id", "image", "flavor", "prefix", "os_type", "os_arch", "tags", "provider", "pool_owner", "pool_type"})
+
+ PoolStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsPoolSubsystem,
+ Name: "status",
+ Help: "Status of the pool",
+ }, []string{"id", "enabled"})
+
+ PoolMaxRunners = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsPoolSubsystem,
+ Name: "max_runners",
+ Help: "Maximum number of runners in the pool",
+ }, []string{"id"})
+
+ PoolMinIdleRunners = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsPoolSubsystem,
+ Name: "min_idle_runners",
+ Help: "Minimum number of idle runners in the pool",
+ }, []string{"id"})
+
+ PoolBootstrapTimeout = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsPoolSubsystem,
+ Name: "bootstrap_timeout",
+ Help: "Runner bootstrap timeout in the pool",
+ }, []string{"id"})
+)
diff --git a/metrics/provider.go b/metrics/provider.go
new file mode 100644
index 00000000..3262ab3b
--- /dev/null
+++ b/metrics/provider.go
@@ -0,0 +1,26 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var ProviderInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsProviderSubsystem,
+ Name: "info",
+ Help: "Info of the organization",
+}, []string{"name", "type", "description"})
diff --git a/metrics/repository.go b/metrics/repository.go
new file mode 100644
index 00000000..21714233
--- /dev/null
+++ b/metrics/repository.go
@@ -0,0 +1,35 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ RepositoryInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsRepositorySubsystem,
+ Name: "info",
+ Help: "Info of the enterprise",
+ }, []string{"name", "id"})
+
+ RepositoryPoolManagerStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsRepositorySubsystem,
+ Name: "pool_manager_status",
+ Help: "Status of the enterprise pool manager",
+ }, []string{"name", "id", "running"})
+)
diff --git a/metrics/util.go b/metrics/util.go
new file mode 100644
index 00000000..d83b4973
--- /dev/null
+++ b/metrics/util.go
@@ -0,0 +1,22 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+func Bool2float64(b bool) float64 {
+ if b {
+ return 1
+ }
+ return 0
+}
diff --git a/metrics/webhooks.go b/metrics/webhooks.go
new file mode 100644
index 00000000..48a08f9c
--- /dev/null
+++ b/metrics/webhooks.go
@@ -0,0 +1,24 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var WebhooksReceived = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsWebhookSubsystem,
+ Name: "received",
+ Help: "The total number of webhooks received",
+}, []string{"valid", "reason"})
diff --git a/params/github.go b/params/github.go
index fc4b1c59..08f7b409 100644
--- a/params/github.go
+++ b/params/github.go
@@ -14,7 +14,16 @@
package params
-import "time"
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "time"
+
+ jwt "github.com/golang-jwt/jwt/v5"
+ "github.com/google/uuid"
+)
type Event string
@@ -22,6 +31,7 @@ const (
// WorkflowJobEvent is the event set in the webhook payload from github
// when a workflow_job hook is sent.
WorkflowJobEvent Event = "workflow_job"
+ PingEvent Event = "ping"
)
// WorkflowJob holds the payload sent by github when a workload_job is sent.
@@ -161,7 +171,9 @@ type WorkflowJob struct {
DefaultBranch string `json:"default_branch"`
} `json:"repository"`
Organization struct {
- Login string `json:"login"`
+ Login string `json:"login"`
+ // Name is a gitea specific field
+ Name string `json:"name"`
ID int64 `json:"id"`
NodeID string `json:"node_id"`
URL string `json:"url"`
@@ -207,3 +219,351 @@ type WorkflowJob struct {
SiteAdmin bool `json:"site_admin"`
} `json:"sender"`
}
+
+func (w WorkflowJob) GetOrgName(forgeType EndpointType) string {
+ if forgeType == GiteaEndpointType {
+ return w.Organization.Name
+ }
+ return w.Organization.Login
+}
+
+type RunnerSetting struct {
+ Ephemeral bool `json:"ephemeral,omitempty"`
+ IsElastic bool `json:"isElastic,omitempty"`
+ DisableUpdate bool `json:"disableUpdate,omitempty"`
+}
+
+type Label struct {
+ Type string `json:"type"`
+ Name string `json:"name"`
+}
+
+type RunnerScaleSetStatistic struct {
+ TotalAvailableJobs int `json:"totalAvailableJobs"`
+ TotalAcquiredJobs int `json:"totalAcquiredJobs"`
+ TotalAssignedJobs int `json:"totalAssignedJobs"`
+ TotalRunningJobs int `json:"totalRunningJobs"`
+ TotalRegisteredRunners int `json:"totalRegisteredRunners"`
+ TotalBusyRunners int `json:"totalBusyRunners"`
+ TotalIdleRunners int `json:"totalIdleRunners"`
+}
+
+type RunnerScaleSet struct {
+ ID int `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ RunnerGroupID int64 `json:"runnerGroupId,omitempty"`
+ RunnerGroupName string `json:"runnerGroupName,omitempty"`
+ Labels []Label `json:"labels,omitempty"`
+ RunnerSetting RunnerSetting `json:"RunnerSetting,omitempty"`
+ CreatedOn time.Time `json:"createdOn,omitempty"`
+ RunnerJitConfigURL string `json:"runnerJitConfigUrl,omitempty"`
+ GetAcquirableJobsURL string `json:"getAcquirableJobsUrl,omitempty"`
+ AcquireJobsURL string `json:"acquireJobsUrl,omitempty"`
+ Statistics *RunnerScaleSetStatistic `json:"statistics,omitempty"`
+ Status interface{} `json:"status,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
+}
+
+type RunnerScaleSetsResponse struct {
+ Count int `json:"count"`
+ RunnerScaleSets []RunnerScaleSet `json:"value"`
+}
+
+type ActionsServiceAdminInfoResponse struct {
+ URL string `json:"url,omitempty"`
+ Token string `json:"token,omitempty"`
+}
+
+func (a ActionsServiceAdminInfoResponse) GetURL() (*url.URL, error) {
+ if a.URL == "" {
+ return nil, fmt.Errorf("no url specified")
+ }
+ u, err := url.ParseRequestURI(a.URL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse URL: %w", err)
+ }
+ return u, nil
+}
+
+func (a ActionsServiceAdminInfoResponse) getJWT() (*jwt.Token, error) {
+ // We're parsing a token we got from the GitHub API. We can't verify its signature.
+ // We do need the expiration date however, or other info.
+ token, _, err := jwt.NewParser().ParseUnverified(a.Token, &jwt.RegisteredClaims{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse jwt token: %w", err)
+ }
+ return token, nil
+}
+
+func (a ActionsServiceAdminInfoResponse) ExiresAt() (time.Time, error) {
+ jwt, err := a.getJWT()
+ if err != nil {
+ return time.Time{}, fmt.Errorf("failed to decode jwt token: %w", err)
+ }
+ expiration, err := jwt.Claims.GetExpirationTime()
+ if err != nil {
+ return time.Time{}, fmt.Errorf("failed to get expiration time: %w", err)
+ }
+
+ return expiration.Time, nil
+}
+
+func (a ActionsServiceAdminInfoResponse) IsExpired() bool {
+ if exp, err := a.ExiresAt(); err == nil {
+ return time.Now().UTC().After(exp)
+ }
+ return true
+}
+
+func (a ActionsServiceAdminInfoResponse) TimeRemaining() (time.Duration, error) {
+ exp, err := a.ExiresAt()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get expiration: %w", err)
+ }
+ now := time.Now().UTC()
+ return exp.Sub(now), nil
+}
+
+func (a ActionsServiceAdminInfoResponse) ExpiresIn(t time.Duration) bool {
+ remaining, err := a.TimeRemaining()
+ if err != nil {
+ return true
+ }
+ return remaining <= t
+}
+
+type ActionsServiceAdminInfoRequest struct {
+ URL string `json:"url,omitempty"`
+ RunnerEvent string `json:"runner_event,omitempty"`
+}
+
+type RunnerScaleSetSession struct {
+ SessionID *uuid.UUID `json:"sessionId,omitempty"`
+ OwnerName string `json:"ownerName,omitempty"`
+ RunnerScaleSet *RunnerScaleSet `json:"runnerScaleSet,omitempty"`
+ MessageQueueURL string `json:"messageQueueUrl,omitempty"`
+ MessageQueueAccessToken string `json:"messageQueueAccessToken,omitempty"`
+ Statistics *RunnerScaleSetStatistic `json:"statistics,omitempty"`
+}
+
+func (a RunnerScaleSetSession) GetURL() (*url.URL, error) {
+ if a.MessageQueueURL == "" {
+ return nil, fmt.Errorf("no url specified")
+ }
+ u, err := url.ParseRequestURI(a.MessageQueueURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse URL: %w", err)
+ }
+ return u, nil
+}
+
+func (a RunnerScaleSetSession) getJWT() (*jwt.Token, error) {
+ // We're parsing a token we got from the GitHub API. We can't verify its signature.
+ // We do need the expiration date however, or other info.
+ token, _, err := jwt.NewParser().ParseUnverified(a.MessageQueueAccessToken, &jwt.RegisteredClaims{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse jwt token: %w", err)
+ }
+ return token, nil
+}
+
+func (a RunnerScaleSetSession) ExiresAt() (time.Time, error) {
+ jwt, err := a.getJWT()
+ if err != nil {
+ return time.Time{}, fmt.Errorf("failed to decode jwt token: %w", err)
+ }
+ expiration, err := jwt.Claims.GetExpirationTime()
+ if err != nil {
+ return time.Time{}, fmt.Errorf("failed to get expiration time: %w", err)
+ }
+
+ return expiration.Time, nil
+}
+
+func (a RunnerScaleSetSession) IsExpired() bool {
+ if exp, err := a.ExiresAt(); err == nil {
+ return time.Now().UTC().After(exp)
+ }
+ return true
+}
+
+func (a RunnerScaleSetSession) TimeRemaining() (time.Duration, error) {
+ exp, err := a.ExiresAt()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get expiration: %w", err)
+ }
+ now := time.Now().UTC()
+ return exp.Sub(now), nil
+}
+
+func (a RunnerScaleSetSession) ExpiresIn(t time.Duration) bool {
+ remaining, err := a.TimeRemaining()
+ if err != nil {
+ return true
+ }
+ return remaining <= t
+}
+
+type RunnerScaleSetMessage struct {
+ MessageID int64 `json:"messageId"`
+ MessageType string `json:"messageType"`
+ Body string `json:"body"`
+ Statistics *RunnerScaleSetStatistic `json:"statistics"`
+}
+
+func (r RunnerScaleSetMessage) IsNil() bool {
+ return r.MessageID == 0 && r.MessageType == "" && r.Body == "" && r.Statistics == nil
+}
+
+func (r RunnerScaleSetMessage) GetJobsFromBody() ([]ScaleSetJobMessage, error) {
+ var body []ScaleSetJobMessage
+ if r.Body == "" {
+ return nil, fmt.Errorf("no body specified")
+ }
+ if err := json.Unmarshal([]byte(r.Body), &body); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal body: %w", err)
+ }
+ return body, nil
+}
+
+type RunnerReference struct {
+ ID int64 `json:"id"`
+ Name string `json:"name"`
+ OS string `json:"os"`
+ RunnerScaleSetID int `json:"runnerScaleSetId"`
+ CreatedOn any `json:"createdOn"`
+ RunnerGroupID uint64 `json:"runnerGroupId"`
+ RunnerGroupName string `json:"runnerGroupName"`
+ Version string `json:"version"`
+ Enabled bool `json:"enabled"`
+ Ephemeral bool `json:"ephemeral"`
+ Status any `json:"status"`
+ DisableUpdate bool `json:"disableUpdate"`
+ ProvisioningState string `json:"provisioningState"`
+ Busy bool `json:"busy"`
+ Labels []Label `json:"labels,omitempty"`
+}
+
+func (r RunnerReference) GetStatus() RunnerStatus {
+ status, ok := r.Status.(string)
+ if !ok {
+ return RunnerUnknown
+ }
+ runnerStatus := RunnerStatus(status)
+ if !runnerStatus.IsValid() {
+ return RunnerUnknown
+ }
+
+ if runnerStatus == RunnerOnline {
+ if r.Busy {
+ return RunnerActive
+ }
+ return RunnerIdle
+ }
+ return runnerStatus
+}
+
+type RunnerScaleSetJitRunnerConfig struct {
+ Runner *RunnerReference `json:"runner"`
+ EncodedJITConfig string `json:"encodedJITConfig"`
+}
+
+func (r RunnerScaleSetJitRunnerConfig) DecodedJITConfig() (map[string]string, error) {
+ if r.EncodedJITConfig == "" {
+ return nil, fmt.Errorf("no encoded JIT config specified")
+ }
+ decoded, err := base64.StdEncoding.DecodeString(r.EncodedJITConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode JIT config: %w", err)
+ }
+ jitConfig := make(map[string]string)
+ if err := json.Unmarshal(decoded, &jitConfig); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal JIT config: %w", err)
+ }
+ return jitConfig, nil
+}
+
+type RunnerReferenceList struct {
+ Count int `json:"count"`
+ RunnerReferences []RunnerReference `json:"value"`
+}
+
+type AcquirableJobList struct {
+ Count int `json:"count"`
+ Jobs []AcquirableJob `json:"value"`
+}
+
+type AcquirableJob struct {
+ AcquireJobURL string `json:"acquireJobUrl"`
+ MessageType string `json:"messageType"`
+ RunnerRequestID int64 `json:"run0ne00rRequestId"`
+ RepositoryName string `json:"repositoryName"`
+ OwnerName string `json:"ownerName"`
+ JobWorkflowRef string `json:"jobWorkflowRef"`
+ EventName string `json:"eventName"`
+ RequestLabels []string `json:"requestLabels"`
+}
+
+type RunnerGroup struct {
+ ID int64 `json:"id"`
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ IsDefault bool `json:"isDefaultGroup"`
+}
+
+type RunnerGroupList struct {
+ Count int `json:"count"`
+ RunnerGroups []RunnerGroup `json:"value"`
+}
+
+type ScaleSetJobMessage struct {
+ MessageType string `json:"messageType,omitempty"`
+ JobID string `json:"jobId,omitempty"`
+ RunnerRequestID int64 `json:"runnerRequestId,omitempty"`
+ RepositoryName string `json:"repositoryName,omitempty"`
+ OwnerName string `json:"ownerName,omitempty"`
+ JobWorkflowRef string `json:"jobWorkflowRef,omitempty"`
+ JobDisplayName string `json:"jobDisplayName,omitempty"`
+ WorkflowRunID int64 `json:"workflowRunId,omitempty"`
+ EventName string `json:"eventName,omitempty"`
+ RequestLabels []string `json:"requestLabels,omitempty"`
+ QueueTime time.Time `json:"queueTime,omitempty"`
+ ScaleSetAssignTime time.Time `json:"scaleSetAssignTime,omitempty"`
+ RunnerAssignTime time.Time `json:"runnerAssignTime,omitempty"`
+ FinishTime time.Time `json:"finishTime,omitempty"`
+ Result string `json:"result,omitempty"`
+ RunnerID int64 `json:"runnerId,omitempty"`
+ RunnerName string `json:"runnerName,omitempty"`
+ AcquireJobURL string `json:"acquireJobUrl,omitempty"`
+}
+
+func (s ScaleSetJobMessage) MessageTypeToStatus() JobStatus {
+ switch s.MessageType {
+ case MessageTypeJobAssigned:
+ return JobStatusQueued
+ case MessageTypeJobStarted:
+ return JobStatusInProgress
+ case MessageTypeJobCompleted:
+ return JobStatusCompleted
+ default:
+ return JobStatusQueued
+ }
+}
+
+func (s ScaleSetJobMessage) ToJob() Job {
+ return Job{
+ ScaleSetJobID: s.JobID,
+ Action: s.EventName,
+ RunID: s.WorkflowRunID,
+ Status: string(s.MessageTypeToStatus()),
+ Conclusion: s.Result,
+ CompletedAt: s.FinishTime,
+ StartedAt: s.RunnerAssignTime,
+ Name: s.JobDisplayName,
+ GithubRunnerID: s.RunnerID,
+ RunnerName: s.RunnerName,
+ RepositoryName: s.RepositoryName,
+ RepositoryOwner: s.OwnerName,
+ Labels: s.RequestLabels,
+ }
+}
diff --git a/params/interfaces.go b/params/interfaces.go
new file mode 100644
index 00000000..31ef635f
--- /dev/null
+++ b/params/interfaces.go
@@ -0,0 +1,35 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package params
+
+import "time"
+
+// EntityGetter is implemented by all github entities (repositories, organizations and enterprises).
+// It defines the GetEntity() function which returns a github entity.
+type EntityGetter interface {
+ GetEntity() (ForgeEntity, error)
+}
+
+type IDGetter interface {
+ GetID() uint
+}
+
+type CreationDateGetter interface {
+ GetCreatedAt() time.Time
+}
+
+type ForgeCredentialsGetter interface {
+ GetForgeCredentials() ForgeCredentials
+}
diff --git a/params/params.go b/params/params.go
index b93952fb..1acd95e1 100644
--- a/params/params.go
+++ b/params/params.go
@@ -15,24 +15,73 @@
package params
import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "crypto/x509"
"encoding/json"
+ "encoding/pem"
+ "fmt"
+ "math"
+ "net"
+ "net/http"
"time"
- "github.com/cloudbase/garm/runner/providers/common"
- "github.com/cloudbase/garm/util/appdefaults"
+ "github.com/bradleyfalzon/ghinstallation/v2"
+ "github.com/google/go-github/v72/github"
+ "github.com/google/uuid"
+ "golang.org/x/oauth2"
- "github.com/google/go-github/v48/github"
- uuid "github.com/satori/go.uuid"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/util/appdefaults"
)
type (
- PoolType string
- AddressType string
- EventType string
- EventLevel string
- OSType string
- OSArch string
- ProviderType string
+ ForgeEntityType string
+ EventType string
+ EventLevel string
+ ProviderType string
+ JobStatus string
+ RunnerStatus string
+ WebhookEndpointType string
+ ForgeAuthType string
+ EndpointType string
+ PoolBalancerType string
+ ScaleSetState string
+ ScaleSetMessageType string
+)
+
+func (s RunnerStatus) IsValid() bool {
+ switch s {
+ case RunnerIdle, RunnerPending, RunnerTerminated,
+ RunnerInstalling, RunnerFailed,
+ RunnerActive, RunnerOffline,
+ RunnerUnknown, RunnerOnline:
+
+ return true
+ }
+ return false
+}
+
+const (
+ // PoolBalancerTypeRoundRobin will try to cycle through the pools of an entity
+ // in a round robin fashion. For example, if a repository has multiple pools that
+ // match a certain set of labels, and the entity is configured to use round robin
+ // balancer, the pool manager will attempt to create instances in each pool in turn
+ // for each job that needs to be serviced. So job1 in pool1, job2 in pool2 and so on.
+ PoolBalancerTypeRoundRobin PoolBalancerType = "roundrobin"
+ // PoolBalancerTypePack will try to create instances in the first pool that matches
+ // the required labels. If the pool is full, it will move on to the next pool and so on.
+ PoolBalancerTypePack PoolBalancerType = "pack"
+ // PoolBalancerTypeNone denotes to the default behavior of the pool manager, which is
+ // to use the round robin balancer.
+ PoolBalancerTypeNone PoolBalancerType = ""
+)
+
+const (
+ AutoEndpointType EndpointType = ""
+ GithubEndpointType EndpointType = "github"
+ GiteaEndpointType EndpointType = "gitea"
)
const (
@@ -43,14 +92,31 @@ const (
)
const (
- RepositoryPool PoolType = "repository"
- OrganizationPool PoolType = "organization"
- EnterprisePool PoolType = "enterprise"
+ // WebhookEndpointDirect instructs garm that it should attempt to create a webhook
+ // in the target entity, using the callback URL defined in the config as a target.
+ WebhookEndpointDirect WebhookEndpointType = "direct"
+ // WebhookEndpointTunnel instructs garm that it should attempt to create a webhook
+ // in the target entity, using the tunnel URL as a base for the webhook URL.
+ // This is defined for future use.
+ WebhookEndpointTunnel WebhookEndpointType = "tunnel"
)
const (
- PublicAddress AddressType = "public"
- PrivateAddress AddressType = "private"
+ JobStatusQueued JobStatus = "queued"
+ JobStatusInProgress JobStatus = "in_progress"
+ JobStatusCompleted JobStatus = "completed"
+)
+
+const (
+ ForgeEntityTypeRepository ForgeEntityType = "repository"
+ ForgeEntityTypeOrganization ForgeEntityType = "organization"
+ ForgeEntityTypeEnterprise ForgeEntityType = "enterprise"
+)
+
+const (
+ MetricsLabelEnterpriseScope = "Enterprise"
+ MetricsLabelRepositoryScope = "Repository"
+ MetricsLabelOrganizationScope = "Organization"
)
const (
@@ -65,30 +131,66 @@ const (
)
const (
- Windows OSType = "windows"
- Linux OSType = "linux"
- Unknown OSType = "unknown"
+ RunnerIdle RunnerStatus = "idle"
+ RunnerPending RunnerStatus = "pending"
+ RunnerTerminated RunnerStatus = "terminated"
+ RunnerInstalling RunnerStatus = "installing"
+ RunnerFailed RunnerStatus = "failed"
+ RunnerActive RunnerStatus = "active"
+ RunnerOffline RunnerStatus = "offline"
+ RunnerOnline RunnerStatus = "online"
+ RunnerUnknown RunnerStatus = "unknown"
)
const (
- Amd64 OSArch = "amd64"
- I386 OSArch = "i386"
- Arm64 OSArch = "arm64"
- Arm OSArch = "arm"
+ // ForgeAuthTypePAT is the OAuth token based authentication
+ ForgeAuthTypePAT ForgeAuthType = "pat"
+ // ForgeAuthTypeApp is the GitHub App based authentication
+ ForgeAuthTypeApp ForgeAuthType = "app"
)
-type Address struct {
- Address string `json:"address"`
- Type AddressType `json:"type"`
+func (e ForgeEntityType) String() string {
+ return string(e)
}
+const (
+ ScaleSetPendingCreate ScaleSetState = "pending_create"
+ ScaleSetCreated ScaleSetState = "created"
+ ScaleSetError ScaleSetState = "error"
+ ScaleSetPendingDelete ScaleSetState = "pending_delete"
+ ScaleSetPendingForceDelete ScaleSetState = "pending_force_delete"
+)
+
+const (
+ MessageTypeRunnerScaleSetJobMessages ScaleSetMessageType = "RunnerScaleSetJobMessages"
+)
+
+const (
+ MessageTypeJobAssigned = "JobAssigned"
+ MessageTypeJobCompleted = "JobCompleted"
+ MessageTypeJobStarted = "JobStarted"
+ MessageTypeJobAvailable = "JobAvailable"
+)
+
+// swagger:model StatusMessage
type StatusMessage struct {
- CreatedAt time.Time `json:"created_at"`
- Message string `json:"message"`
- EventType EventType `json:"event_type"`
- EventLevel EventLevel `json:"event_level"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ Message string `json:"message,omitempty"`
+ EventType EventType `json:"event_type,omitempty"`
+ EventLevel EventLevel `json:"event_level,omitempty"`
}
+// swagger:model EntityEvent
+type EntityEvent struct {
+ ID uint `json:"id,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+
+ EventType EventType `json:"event_type,omitempty"`
+ EventLevel EventLevel `json:"event_level,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+// swagger:model Instance
type Instance struct {
// ID is the database ID of this instance.
ID string `json:"id,omitempty"`
@@ -98,8 +200,12 @@ type Instance struct {
// instance in the provider.
ProviderID string `json:"provider_id,omitempty"`
+ // ProviderName is the name of the IaaS where the instance was
+ // created.
+ ProviderName string `json:"provider_name"`
+
// AgentID is the github runner agent ID.
- AgentID int64 `json:"agent_id"`
+ AgentID int64 `json:"agent_id,omitempty"`
// Name is the name associated with an instance. Depending on
// the provider, this may or may not be useful in the context of
@@ -109,7 +215,7 @@ type Instance struct {
// OSType is the operating system type. For now, only Linux and
// Windows are supported.
- OSType OSType `json:"os_type,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
// OSName is the name of the OS. Eg: ubuntu, centos, etc.
OSName string `json:"os_name,omitempty"`
@@ -118,21 +224,24 @@ type Instance struct {
OSVersion string `json:"os_version,omitempty"`
// OSArch is the operating system architecture.
- OSArch OSArch `json:"os_arch,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
// Addresses is a list of IP addresses the provider reports
// for this instance.
- Addresses []Address `json:"addresses,omitempty"`
+ Addresses []commonParams.Address `json:"addresses,omitempty"`
// Status is the status of the instance inside the provider (eg: running, stopped, etc)
- Status common.InstanceStatus `json:"status,omitempty"`
+ Status commonParams.InstanceStatus `json:"status,omitempty"`
// RunnerStatus is the github runner status as it appears on GitHub.
- RunnerStatus common.RunnerStatus `json:"runner_status,omitempty"`
+ RunnerStatus RunnerStatus `json:"runner_status,omitempty"`
// PoolID is the ID of the garm pool to which a runner belongs.
PoolID string `json:"pool_id,omitempty"`
+ // ScaleSetID is the ID of the scale set to which a runner belongs.
+ ScaleSetID uint `json:"scale_set_id,omitempty"`
+
// ProviderFault holds any error messages captured from the IaaS provider that is
// responsible for managing the lifecycle of the runner.
ProviderFault []byte `json:"provider_fault,omitempty"`
@@ -141,18 +250,30 @@ type Instance struct {
// up.
StatusMessages []StatusMessage `json:"status_messages,omitempty"`
+ // CreatedAt is the timestamp of the creation of this runner.
+ CreatedAt time.Time `json:"created_at,omitempty"`
+
// UpdatedAt is the timestamp of the last update to this runner.
- UpdatedAt time.Time `json:"updated_at"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
// GithubRunnerGroup is the github runner group to which the runner belongs.
// The runner group must be created by someone with access to the enterprise.
- GitHubRunnerGroup string `json:"github-runner-group"`
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+
+ // Job is the current job that is being serviced by this runner.
+ Job *Job `json:"job,omitempty"`
// Do not serialize sensitive info.
- CallbackURL string `json:"-"`
- MetadataURL string `json:"-"`
- CreateAttempt int `json:"-"`
- TokenFetched bool `json:"-"`
+ CallbackURL string `json:"-"`
+ MetadataURL string `json:"-"`
+ CreateAttempt int `json:"-"`
+ TokenFetched bool `json:"-"`
+ AditionalLabels []string `json:"-"`
+ JitConfiguration map[string]string `json:"-"`
+}
+
+func (i Instance) GetCreatedAt() time.Time {
+ return i.CreatedAt
}
func (i Instance) GetName() string {
@@ -163,22 +284,26 @@ func (i Instance) GetID() string {
return i.ID
}
+// used by swagger client generated code
+// swagger:model Instances
+type Instances []Instance
+
type BootstrapInstance struct {
- Name string `json:"name"`
- Tools []*github.RunnerApplicationDownload `json:"tools"`
+ Name string `json:"name,omitempty"`
+ Tools []*github.RunnerApplicationDownload `json:"tools,omitempty"`
// RepoURL is the URL the github runner agent needs to configure itself.
- RepoURL string `json:"repo_url"`
+ RepoURL string `json:"repo_url,omitempty"`
// CallbackUrl is the URL where the instance can send a post, signaling
// progress or status.
- CallbackURL string `json:"callback-url"`
+ CallbackURL string `json:"callback-url,omitempty"`
// MetadataURL is the URL where instances can fetch information needed to set themselves up.
- MetadataURL string `json:"metadata-url"`
+ MetadataURL string `json:"metadata-url,omitempty"`
// InstanceToken is the token that needs to be set by the instance in the headers
// in order to send updated back to the garm via CallbackURL.
- InstanceToken string `json:"instance-token"`
+ InstanceToken string `json:"instance-token,omitempty"`
// SSHKeys are the ssh public keys we may want to inject inside the runners, if the
// provider supports it.
- SSHKeys []string `json:"ssh-keys"`
+ SSHKeys []string `json:"ssh-keys,omitempty"`
// ExtraSpecs is an opaque raw json that gets sent to the provider
// as part of the bootstrap params for instances. It can contain
// any kind of data needed by providers. The contents of this field means
@@ -189,69 +314,78 @@ type BootstrapInstance struct {
// GitHubRunnerGroup is the github runner group in which the newly installed runner
// should be added to. The runner group must be created by someone with access to the
// enterprise.
- GitHubRunnerGroup string `json:"github-runner-group"`
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
// CACertBundle is a CA certificate bundle which will be sent to instances and which
// will tipically be installed as a system wide trusted root CA. by either cloud-init
// or whatever mechanism the provider will use to set up the runner.
- CACertBundle []byte `json:"ca-cert-bundle"`
+ CACertBundle []byte `json:"ca-cert-bundle,omitempty"`
// OSArch is the target OS CPU architecture of the runner.
- OSArch OSArch `json:"arch"`
+ OSArch commonParams.OSArch `json:"arch,omitempty"`
// OSType is the target OS platform of the runner (windows, linux).
- OSType OSType `json:"os_type"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
// Flavor is the platform specific abstraction that defines what resources will be allocated
// to the runner (CPU, RAM, disk space, etc). This field is meaningful to the provider which
// handles the actual creation.
- Flavor string `json:"flavor"`
+ Flavor string `json:"flavor,omitempty"`
// Image is the platform specific identifier of the operating system template that will be used
// to spin up a new machine.
- Image string `json:"image"`
+ Image string `json:"image,omitempty"`
// Labels are a list of github runner labels that will be added to the runner.
- Labels []string `json:"labels"`
+ Labels []string `json:"labels,omitempty"`
// PoolID is the ID of the garm pool to which this runner belongs.
- PoolID string `json:"pool_id"`
+ PoolID string `json:"pool_id,omitempty"`
// UserDataOptions are the options for the user data generation.
- UserDataOptions UserDataOptions `json:"user_data_options"`
+ UserDataOptions UserDataOptions `json:"user_data_options,omitempty"`
}
type UserDataOptions struct {
- DisableUpdatesOnBoot bool `json:"disable_updates_on_boot"`
- ExtraPackages []string `json:"extra_packages"`
+ DisableUpdatesOnBoot bool `json:"disable_updates_on_boot,omitempty"`
+ ExtraPackages []string `json:"extra_packages,omitempty"`
}
type Tag struct {
- ID string `json:"id"`
- Name string `json:"name"`
+ ID string `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
}
+// swagger:model Pool
type Pool struct {
RunnerPrefix
- ID string `json:"id"`
- ProviderName string `json:"provider_name"`
- MaxRunners uint `json:"max_runners"`
- MinIdleRunners uint `json:"min_idle_runners"`
- Image string `json:"image"`
- Flavor string `json:"flavor"`
- OSType OSType `json:"os_type"`
- OSArch OSArch `json:"os_arch"`
- Tags []Tag `json:"tags"`
- Enabled bool `json:"enabled"`
- Instances []Instance `json:"instances"`
- RepoID string `json:"repo_id,omitempty"`
- RepoName string `json:"repo_name,omitempty"`
- OrgID string `json:"org_id,omitempty"`
- OrgName string `json:"org_name,omitempty"`
- EnterpriseID string `json:"enterprise_id,omitempty"`
- EnterpriseName string `json:"enterprise_name,omitempty"`
- RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout"`
+ ID string `json:"id,omitempty"`
+ ProviderName string `json:"provider_name,omitempty"`
+ MaxRunners uint `json:"max_runners,omitempty"`
+ MinIdleRunners uint `json:"min_idle_runners,omitempty"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ Tags []Tag `json:"tags,omitempty"`
+ Enabled bool `json:"enabled,omitempty"`
+ Instances []Instance `json:"instances,omitempty"`
+
+ RepoID string `json:"repo_id,omitempty"`
+ RepoName string `json:"repo_name,omitempty"`
+
+ OrgID string `json:"org_id,omitempty"`
+ OrgName string `json:"org_name,omitempty"`
+
+ EnterpriseID string `json:"enterprise_id,omitempty"`
+ EnterpriseName string `json:"enterprise_name,omitempty"`
+
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+
+ RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
// ExtraSpecs is an opaque raw json that gets sent to the provider
// as part of the bootstrap params for instances. It can contain
// any kind of data needed by providers. The contents of this field means
@@ -260,7 +394,64 @@ type Pool struct {
ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
// GithubRunnerGroup is the github runner group in which the runners will be added.
// The runner group must be created by someone with access to the enterprise.
- GitHubRunnerGroup string `json:"github-runner-group"`
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+
+ // Priority is the priority of the pool. The higher the number, the higher the priority.
+ // When fetching matching pools for a set of tags, the result will be sorted in descending
+ // order of priority.
+ Priority uint `json:"priority,omitempty"`
+}
+
+func (p Pool) BelongsTo(entity ForgeEntity) bool {
+ switch p.PoolType() {
+ case ForgeEntityTypeRepository:
+ return p.RepoID == entity.ID
+ case ForgeEntityTypeOrganization:
+ return p.OrgID == entity.ID
+ case ForgeEntityTypeEnterprise:
+ return p.EnterpriseID == entity.ID
+ }
+ return false
+}
+
+func (p Pool) GetCreatedAt() time.Time {
+ return p.CreatedAt
+}
+
+func (p Pool) MinIdleRunnersAsInt() int {
+ if p.MinIdleRunners > math.MaxInt {
+ return math.MaxInt
+ }
+
+ return int(p.MinIdleRunners)
+}
+
+func (p Pool) MaxRunnersAsInt() int {
+ if p.MaxRunners > math.MaxInt {
+ return math.MaxInt
+ }
+ return int(p.MaxRunners)
+}
+
+func (p Pool) GetEntity() (ForgeEntity, error) {
+ switch p.PoolType() {
+ case ForgeEntityTypeRepository:
+ return ForgeEntity{
+ ID: p.RepoID,
+ EntityType: ForgeEntityTypeRepository,
+ }, nil
+ case ForgeEntityTypeOrganization:
+ return ForgeEntity{
+ ID: p.OrgID,
+ EntityType: ForgeEntityTypeOrganization,
+ }, nil
+ case ForgeEntityTypeEnterprise:
+ return ForgeEntity{
+ ID: p.EnterpriseID,
+ EntityType: ForgeEntityTypeEnterprise,
+ }, nil
+ }
+ return ForgeEntity{}, fmt.Errorf("pool has no associated entity")
}
func (p Pool) GetID() string {
@@ -274,39 +465,201 @@ func (p *Pool) RunnerTimeout() uint {
return p.RunnerBootstrapTimeout
}
-func (p *Pool) PoolType() PoolType {
- if p.RepoID != "" {
- return RepositoryPool
- } else if p.OrgID != "" {
- return OrganizationPool
- } else if p.EnterpriseID != "" {
- return EnterprisePool
+func (p *Pool) PoolType() ForgeEntityType {
+ switch {
+ case p.RepoID != "":
+ return ForgeEntityTypeRepository
+ case p.OrgID != "":
+ return ForgeEntityTypeOrganization
+ case p.EnterpriseID != "":
+ return ForgeEntityTypeEnterprise
}
return ""
}
-type Internal struct {
- OAuth2Token string `json:"oauth2"`
- ControllerID string `json:"controller_id"`
- InstanceCallbackURL string `json:"instance_callback_url"`
- InstanceMetadataURL string `json:"instance_metadata_url"`
- JWTSecret string `json:"jwt_secret"`
- // GithubCredentialsDetails contains all info about the credentials, except the
- // token, which is added above.
- GithubCredentialsDetails GithubCredentials `json:"gh_creds_details"`
+func (p *Pool) HasRequiredLabels(set []string) bool {
+ asMap := make(map[string]struct{}, len(p.Tags))
+ for _, t := range p.Tags {
+ asMap[t.Name] = struct{}{}
+ }
+
+ for _, l := range set {
+ if _, ok := asMap[l]; !ok {
+ return false
+ }
+ }
+ return true
}
+// used by swagger client generated code
+// swagger:model Pools
+type Pools []Pool
+
+// swagger:model ScaleSet
+type ScaleSet struct {
+ RunnerPrefix
+
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+
+ ID uint `json:"id,omitempty"`
+ ScaleSetID int `json:"scale_set_id,omitempty"`
+ Name string `json:"name,omitempty"`
+ DisableUpdate bool `json:"disable_update"`
+
+ State ScaleSetState `json:"state"`
+ ExtendedState string `json:"extended_state,omitempty"`
+
+ ProviderName string `json:"provider_name,omitempty"`
+ MaxRunners uint `json:"max_runners,omitempty"`
+ MinIdleRunners uint `json:"min_idle_runners,omitempty"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ Enabled bool `json:"enabled,omitempty"`
+ Instances []Instance `json:"instances,omitempty"`
+ DesiredRunnerCount int `json:"desired_runner_count,omitempty"`
+
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+
+ RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout,omitempty"`
+ // ExtraSpecs is an opaque raw json that gets sent to the provider
+ // as part of the bootstrap params for instances. It can contain
+ // any kind of data needed by providers. The contents of this field means
+ // nothing to garm itself. We don't act on the information in this field at
+ // all. We only validate that it's a proper json.
+ ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
+ // GithubRunnerGroup is the github runner group in which the runners will be added.
+ // The runner group must be created by someone with access to the enterprise.
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+
+ StatusMessages []StatusMessage `json:"status_messages"`
+
+ RepoID string `json:"repo_id,omitempty"`
+ RepoName string `json:"repo_name,omitempty"`
+
+ OrgID string `json:"org_id,omitempty"`
+ OrgName string `json:"org_name,omitempty"`
+
+ EnterpriseID string `json:"enterprise_id,omitempty"`
+ EnterpriseName string `json:"enterprise_name,omitempty"`
+
+ LastMessageID int64 `json:"-"`
+}
+
+func (p ScaleSet) BelongsTo(entity ForgeEntity) bool {
+ switch p.ScaleSetType() {
+ case ForgeEntityTypeRepository:
+ return p.RepoID == entity.ID
+ case ForgeEntityTypeOrganization:
+ return p.OrgID == entity.ID
+ case ForgeEntityTypeEnterprise:
+ return p.EnterpriseID == entity.ID
+ }
+ return false
+}
+
+func (p ScaleSet) GetID() uint {
+ return p.ID
+}
+
+func (p ScaleSet) GetEntity() (ForgeEntity, error) {
+ switch p.ScaleSetType() {
+ case ForgeEntityTypeRepository:
+ return ForgeEntity{
+ ID: p.RepoID,
+ EntityType: ForgeEntityTypeRepository,
+ }, nil
+ case ForgeEntityTypeOrganization:
+ return ForgeEntity{
+ ID: p.OrgID,
+ EntityType: ForgeEntityTypeOrganization,
+ }, nil
+ case ForgeEntityTypeEnterprise:
+ return ForgeEntity{
+ ID: p.EnterpriseID,
+ EntityType: ForgeEntityTypeEnterprise,
+ }, nil
+ }
+ return ForgeEntity{}, fmt.Errorf("scale set has no associated entity")
+}
+
+func (p *ScaleSet) ScaleSetType() ForgeEntityType {
+ switch {
+ case p.RepoID != "":
+ return ForgeEntityTypeRepository
+ case p.OrgID != "":
+ return ForgeEntityTypeOrganization
+ case p.EnterpriseID != "":
+ return ForgeEntityTypeEnterprise
+ }
+ return ""
+}
+
+func (p *ScaleSet) RunnerTimeout() uint {
+ if p.RunnerBootstrapTimeout == 0 {
+ return appdefaults.DefaultRunnerBootstrapTimeout
+ }
+ return p.RunnerBootstrapTimeout
+}
+
+// used by swagger client generated code
+// swagger:model ScaleSets
+type ScaleSets []ScaleSet
+
+// swagger:model Repository
type Repository struct {
- ID string `json:"id"`
- Owner string `json:"owner"`
- Name string `json:"name"`
- Pools []Pool `json:"pool,omitempty"`
- CredentialsName string `json:"credentials_name"`
+ ID string `json:"id,omitempty"`
+ Owner string `json:"owner,omitempty"`
+ Name string `json:"name,omitempty"`
+ Pools []Pool `json:"pool,omitempty"`
+ // CredentialName is the name of the credentials associated with the enterprise.
+ // This field is now deprecated. Use CredentialsID instead. This field will be
+ // removed in v0.2.0.
+ CredentialsName string `json:"credentials_name,omitempty"`
+
+ CredentialsID uint `json:"credentials_id,omitempty"`
+ Credentials ForgeCredentials `json:"credentials,omitempty"`
+
PoolManagerStatus PoolManagerStatus `json:"pool_manager_status,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ Events []EntityEvent `json:"events,omitempty"`
// Do not serialize sensitive info.
WebhookSecret string `json:"-"`
}
+func (r Repository) GetCredentialsName() string {
+ if r.CredentialsName != "" {
+ return r.CredentialsName
+ }
+ return r.Credentials.Name
+}
+
+func (r Repository) CreationDateGetter() time.Time {
+ return r.CreatedAt
+}
+
+func (r Repository) GetEntity() (ForgeEntity, error) {
+ if r.ID == "" {
+ return ForgeEntity{}, fmt.Errorf("repository has no ID")
+ }
+ return ForgeEntity{
+ ID: r.ID,
+ EntityType: ForgeEntityTypeRepository,
+ Owner: r.Owner,
+ Name: r.Name,
+ PoolBalancerType: r.PoolBalancerType,
+ Credentials: r.Credentials,
+ WebhookSecret: r.WebhookSecret,
+ CreatedAt: r.CreatedAt,
+ UpdatedAt: r.UpdatedAt,
+ }, nil
+}
+
func (r Repository) GetName() string {
return r.Name
}
@@ -315,16 +668,62 @@ func (r Repository) GetID() string {
return r.ID
}
+func (r Repository) GetBalancerType() PoolBalancerType {
+ if r.PoolBalancerType == "" {
+ return PoolBalancerTypeRoundRobin
+ }
+ return r.PoolBalancerType
+}
+
+func (r Repository) String() string {
+ return fmt.Sprintf("%s/%s", r.Owner, r.Name)
+}
+
+// used by swagger client generated code
+// swagger:model Repositories
+type Repositories []Repository
+
+// swagger:model Organization
type Organization struct {
- ID string `json:"id"`
- Name string `json:"name"`
- Pools []Pool `json:"pool,omitempty"`
- CredentialsName string `json:"credentials_name"`
+ ID string `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Pools []Pool `json:"pool,omitempty"`
+ // CredentialName is the name of the credentials associated with the enterprise.
+ // This field is now deprecated. Use CredentialsID instead. This field will be
+ // removed in v0.2.0.
+ CredentialsName string `json:"credentials_name,omitempty"`
+ Credentials ForgeCredentials `json:"credentials,omitempty"`
+ CredentialsID uint `json:"credentials_id,omitempty"`
PoolManagerStatus PoolManagerStatus `json:"pool_manager_status,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ Events []EntityEvent `json:"events,omitempty"`
// Do not serialize sensitive info.
WebhookSecret string `json:"-"`
}
+func (o Organization) GetCreatedAt() time.Time {
+ return o.CreatedAt
+}
+
+func (o Organization) GetEntity() (ForgeEntity, error) {
+ if o.ID == "" {
+ return ForgeEntity{}, fmt.Errorf("organization has no ID")
+ }
+ return ForgeEntity{
+ ID: o.ID,
+ EntityType: ForgeEntityTypeOrganization,
+ Owner: o.Name,
+ WebhookSecret: o.WebhookSecret,
+ PoolBalancerType: o.PoolBalancerType,
+ Credentials: o.Credentials,
+ CreatedAt: o.CreatedAt,
+ UpdatedAt: o.UpdatedAt,
+ }, nil
+}
+
func (o Organization) GetName() string {
return o.Name
}
@@ -333,16 +732,58 @@ func (o Organization) GetID() string {
return o.ID
}
+func (o Organization) GetBalancerType() PoolBalancerType {
+ if o.PoolBalancerType == "" {
+ return PoolBalancerTypeRoundRobin
+ }
+ return o.PoolBalancerType
+}
+
+// used by swagger client generated code
+// swagger:model Organizations
+type Organizations []Organization
+
+// swagger:model Enterprise
type Enterprise struct {
- ID string `json:"id"`
- Name string `json:"name"`
- Pools []Pool `json:"pool,omitempty"`
- CredentialsName string `json:"credentials_name"`
+ ID string `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Pools []Pool `json:"pool,omitempty"`
+ // CredentialName is the name of the credentials associated with the enterprise.
+ // This field is now deprecated. Use CredentialsID instead. This field will be
+ // removed in v0.2.0.
+ CredentialsName string `json:"credentials_name,omitempty"`
+ Credentials ForgeCredentials `json:"credentials,omitempty"`
+ CredentialsID uint `json:"credentials_id,omitempty"`
PoolManagerStatus PoolManagerStatus `json:"pool_manager_status,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ Events []EntityEvent `json:"events,omitempty"`
// Do not serialize sensitive info.
WebhookSecret string `json:"-"`
}
+func (e Enterprise) GetCreatedAt() time.Time {
+ return e.CreatedAt
+}
+
+func (e Enterprise) GetEntity() (ForgeEntity, error) {
+ if e.ID == "" {
+ return ForgeEntity{}, fmt.Errorf("enterprise has no ID")
+ }
+ return ForgeEntity{
+ ID: e.ID,
+ EntityType: ForgeEntityTypeEnterprise,
+ Owner: e.Name,
+ WebhookSecret: e.WebhookSecret,
+ PoolBalancerType: e.PoolBalancerType,
+ Credentials: e.Credentials,
+ CreatedAt: e.CreatedAt,
+ UpdatedAt: e.UpdatedAt,
+ }, nil
+}
+
func (e Enterprise) GetName() string {
return e.Name
}
@@ -351,61 +792,265 @@ func (e Enterprise) GetID() string {
return e.ID
}
+func (e Enterprise) GetBalancerType() PoolBalancerType {
+ if e.PoolBalancerType == "" {
+ return PoolBalancerTypeRoundRobin
+ }
+ return e.PoolBalancerType
+}
+
+// used by swagger client generated code
+// swagger:model Enterprises
+type Enterprises []Enterprise
+
// Users holds information about a particular user
+// swagger:model User
type User struct {
- ID string `json:"id"`
- CreatedAt time.Time `json:"created_at"`
- UpdatedAt time.Time `json:"updated_at"`
- Email string `json:"email"`
- Username string `json:"username"`
- FullName string `json:"full_name"`
- Password string `json:"-"`
- Enabled bool `json:"enabled"`
- IsAdmin bool `json:"is_admin"`
+ ID string `json:"id,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ Email string `json:"email,omitempty"`
+ Username string `json:"username,omitempty"`
+ FullName string `json:"full_name,omitempty"`
+ Enabled bool `json:"enabled,omitempty"`
+ IsAdmin bool `json:"is_admin,omitempty"`
+ // Do not serialize sensitive info.
+ Password string `json:"-"`
+ Generation uint `json:"-"`
}
// JWTResponse holds the JWT token returned as a result of a
// successful auth
+// swagger:model JWTResponse
type JWTResponse struct {
- Token string `json:"token"`
+ Token string `json:"token,omitempty"`
}
+// swagger:model ControllerInfo
type ControllerInfo struct {
- ControllerID uuid.UUID `json:"controller_id"`
- Hostname string `json:"hostname"`
+ // ControllerID is the unique ID of this controller. This ID gets generated
+ // automatically on controller init.
+ ControllerID uuid.UUID `json:"controller_id,omitempty"`
+ // Hostname is the hostname of the machine that runs this controller. In the
+ // future, this field will be migrated to a separate table that will keep track
+ // of each the controller nodes that are part of a cluster. This will happen when
+ // we implement controller scale-out capability.
+ Hostname string `json:"hostname,omitempty"`
+ // MetadataURL is the public metadata URL of the GARM instance. This URL is used
+ // by instances to fetch information they need to set themselves up. The URL itself
+ // may be made available to runners via a reverse proxy or a load balancer. That
+ // means that the user is responsible for telling GARM what the public URL is, by
+ // setting this field.
+ MetadataURL string `json:"metadata_url,omitempty"`
+ // CallbackURL is the URL where instances can send updates back to the controller.
+ // This URL is used by instances to send status updates back to the controller. The
+ // URL itself may be made available to instances via a reverse proxy or a load balancer.
+ // That means that the user is responsible for telling GARM what the public URL is, by
+ // setting this field.
+ CallbackURL string `json:"callback_url,omitempty"`
+ // WebhookURL is the base URL where the controller will receive webhooks from github.
+ // When webhook management is used, this URL is used as a base to which the controller
+ // UUID is appended and which will receive the webhooks.
+ // The URL itself may be made available to instances via a reverse proxy or a load balancer.
+ // That means that the user is responsible for telling GARM what the public URL is, by
+ // setting this field.
+ WebhookURL string `json:"webhook_url,omitempty"`
+ // ControllerWebhookURL is the controller specific URL where webhooks will be received.
+ // This field holds the WebhookURL defined above to which we append the ControllerID.
+ // Functionally it is the same as WebhookURL, but it allows us to safely manage webhooks
+ // from GARM without accidentally removing webhooks from other services or GARM controllers.
+ ControllerWebhookURL string `json:"controller_webhook_url,omitempty"`
+ // MinimumJobAgeBackoff is the minimum time in seconds that a job must be in queued state
+ // before GARM will attempt to allocate a runner for it. When set to a non zero value,
+ // GARM will ignore the job until the job's age is greater than this value. When using
+ // the min_idle_runners feature of a pool, this gives enough time for potential idle
+ // runners to pick up the job before GARM attempts to allocate a new runner, thus avoiding
+ // the need to potentially scale down runners later.
+ MinimumJobAgeBackoff uint `json:"minimum_job_age_backoff,omitempty"`
+ // Version is the version of the GARM controller.
+ Version string `json:"version,omitempty"`
}
-type GithubCredentials struct {
- Name string `json:"name,omitempty"`
- Description string `json:"description,omitempty"`
- BaseURL string `json:"base_url"`
- APIBaseURL string `json:"api_base_url"`
- UploadBaseURL string `json:"upload_base_url"`
- CABundle []byte `json:"ca_bundle,omitempty"`
+func (c *ControllerInfo) JobBackoff() time.Duration {
+ if math.MaxInt64 > c.MinimumJobAgeBackoff {
+ return time.Duration(math.MaxInt64)
+ }
+
+ return time.Duration(int64(c.MinimumJobAgeBackoff))
}
+// swagger:model GithubRateLimit
+type GithubRateLimit struct {
+ Limit int `json:"limit,omitempty"`
+ Used int `json:"used,omitempty"`
+ Remaining int `json:"remaining,omitempty"`
+ Reset int64 `json:"reset,omitempty"`
+}
+
+func (g GithubRateLimit) ResetIn() time.Duration {
+ return time.Until(g.ResetAt())
+}
+
+func (g GithubRateLimit) ResetAt() time.Time {
+ if g.Reset == 0 {
+ return time.Time{}
+ }
+ return time.Unix(g.Reset, 0)
+}
+
+// swagger:model ForgeCredentials
+type ForgeCredentials struct {
+ ID uint `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ APIBaseURL string `json:"api_base_url,omitempty"`
+ UploadBaseURL string `json:"upload_base_url,omitempty"`
+ BaseURL string `json:"base_url,omitempty"`
+ CABundle []byte `json:"ca_bundle,omitempty"`
+ AuthType ForgeAuthType `json:"auth-type,omitempty"`
+
+ ForgeType EndpointType `json:"forge_type,omitempty"`
+
+ Repositories []Repository `json:"repositories,omitempty"`
+ Organizations []Organization `json:"organizations,omitempty"`
+ Enterprises []Enterprise `json:"enterprises,omitempty"`
+ Endpoint ForgeEndpoint `json:"endpoint,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ RateLimit *GithubRateLimit `json:"rate_limit,omitempty"`
+
+ // Do not serialize sensitive info.
+ CredentialsPayload []byte `json:"-"`
+}
+
+func (g ForgeCredentials) GetID() uint {
+ return g.ID
+}
+
+func (g ForgeCredentials) GetHTTPClient(ctx context.Context) (*http.Client, error) {
+ var roots *x509.CertPool
+ if g.CABundle != nil {
+ roots = x509.NewCertPool()
+ ok := roots.AppendCertsFromPEM(g.CABundle)
+ if !ok {
+ return nil, fmt.Errorf("failed to parse CA cert")
+ }
+ }
+
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }
+
+ httpTransport := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialer.DialContext,
+ TLSClientConfig: &tls.Config{
+ RootCAs: roots,
+ MinVersion: tls.VersionTLS12,
+ },
+ ForceAttemptHTTP2: true,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+
+ var tc *http.Client
+ switch g.AuthType {
+ case ForgeAuthTypeApp:
+ var app GithubApp
+ if err := json.Unmarshal(g.CredentialsPayload, &app); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal github app credentials: %w", err)
+ }
+ if app.AppID == 0 || app.InstallationID == 0 || len(app.PrivateKeyBytes) == 0 {
+ return nil, fmt.Errorf("github app credentials are missing required fields")
+ }
+ itr, err := ghinstallation.New(httpTransport, app.AppID, app.InstallationID, app.PrivateKeyBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create github app installation transport: %w", err)
+ }
+ itr.BaseURL = g.APIBaseURL
+
+ tc = &http.Client{Transport: itr}
+ default:
+ var pat GithubPAT
+ if err := json.Unmarshal(g.CredentialsPayload, &pat); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal github app credentials: %w", err)
+ }
+ httpClient := &http.Client{Transport: httpTransport}
+ ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
+
+ if pat.OAuth2Token == "" {
+ return nil, fmt.Errorf("github credentials are missing the OAuth2 token")
+ }
+
+ ts := oauth2.StaticTokenSource(
+ &oauth2.Token{AccessToken: pat.OAuth2Token},
+ )
+ tc = oauth2.NewClient(ctx, ts)
+ }
+
+ return tc, nil
+}
+
+func (g ForgeCredentials) RootCertificateBundle() (CertificateBundle, error) {
+ if len(g.CABundle) == 0 {
+ return CertificateBundle{}, nil
+ }
+
+ ret := map[string][]byte{}
+
+ var block *pem.Block
+ rest := g.CABundle
+ for {
+ block, rest = pem.Decode(rest)
+ if block == nil {
+ break
+ }
+ pub, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return CertificateBundle{}, err
+ }
+ out := &bytes.Buffer{}
+ if err := pem.Encode(out, &pem.Block{Type: "CERTIFICATE", Bytes: block.Bytes}); err != nil {
+ return CertificateBundle{}, err
+ }
+ ret[fmt.Sprintf("%d", pub.SerialNumber)] = out.Bytes()
+ }
+
+ return CertificateBundle{
+ RootCertificates: ret,
+ }, nil
+}
+
+// used by swagger client generated code
+// swagger:model Credentials
+type Credentials []ForgeCredentials
+
+// swagger:model Provider
type Provider struct {
- Name string `json:"name"`
- ProviderType ProviderType `json:"type"`
- Description string `json:"description"`
+ Name string `json:"name,omitempty"`
+ ProviderType ProviderType `json:"type,omitempty"`
+ Description string `json:"description,omitempty"`
}
-type UpdatePoolStateParams struct {
- WebhookSecret string
-}
+// used by swagger client generated code
+// swagger:model Providers
+type Providers []Provider
+// swagger:model PoolManagerStatus
type PoolManagerStatus struct {
- IsRunning bool `json:"running"`
+ IsRunning bool `json:"running,omitempty"`
FailureReason string `json:"failure_reason,omitempty"`
}
type RunnerInfo struct {
- Name string
- Labels []string
+ Name string `json:"name,omitempty"`
+ Labels []string `json:"labels,omitempty"`
}
type RunnerPrefix struct {
- Prefix string `json:"runner_prefix"`
+ Prefix string `json:"runner_prefix,omitempty"`
}
func (p RunnerPrefix) GetRunnerPrefix() string {
@@ -414,3 +1059,198 @@ func (p RunnerPrefix) GetRunnerPrefix() string {
}
return p.Prefix
}
+
+// swagger:model Job
+type Job struct {
+ // ID is the ID of the job.
+ ID int64 `json:"id,omitempty"`
+
+ WorkflowJobID int64 `json:"workflow_job_id,omitempty"`
+ // ScaleSetJobID is the job ID when generated for a scale set.
+ ScaleSetJobID string `json:"scaleset_job_id,omitempty"`
+ // RunID is the ID of the workflow run. A run may have multiple jobs.
+ RunID int64 `json:"run_id,omitempty"`
+ // Action is the specific activity that triggered the event.
+ Action string `json:"action,omitempty"`
+ // Conclusion is the outcome of the job.
+ // Possible values: "success", "failure", "neutral", "cancelled", "skipped",
+ // "timed_out", "action_required"
+ Conclusion string `json:"conclusion,omitempty"`
+ // Status is the phase of the lifecycle that the job is currently in.
+ // "queued", "in_progress" and "completed".
+ Status string `json:"status,omitempty"`
+ // Name is the name if the job that was triggered.
+ Name string `json:"name,omitempty"`
+
+ StartedAt time.Time `json:"started_at,omitempty"`
+ CompletedAt time.Time `json:"completed_at,omitempty"`
+
+ GithubRunnerID int64 `json:"runner_id,omitempty"`
+ RunnerName string `json:"runner_name,omitempty"`
+ RunnerGroupID int64 `json:"runner_group_id,omitempty"`
+ RunnerGroupName string `json:"runner_group_name,omitempty"`
+
+ // repository in which the job was triggered.
+ RepositoryName string `json:"repository_name,omitempty"`
+ RepositoryOwner string `json:"repository_owner,omitempty"`
+
+ Labels []string `json:"labels,omitempty"`
+
+ // The entity that received the hook.
+ //
+ // Webhooks may be configured on the repo, the org and/or the enterprise.
+ // If we only configure a repo to use garm, we'll only ever receive a
+ // webhook from the repo. But if we configure the parent org of the repo and
+ // the parent enterprise of the org to use garm, a webhook will be sent for each
+ // entity type, in response to one workflow event. Thus, we will get 3 webhooks
+ // with the same run_id and job id. Record all involved entities in the same job
+ // if we have them configured in garm.
+ RepoID *uuid.UUID `json:"repo_id,omitempty"`
+ OrgID *uuid.UUID `json:"org_id,omitempty"`
+ EnterpriseID *uuid.UUID `json:"enterprise_id,omitempty"`
+
+ LockedBy uuid.UUID `json:"locked_by,omitempty"`
+
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+}
+
+// swagger:model Jobs
+// used by swagger client generated code
+type Jobs []Job
+
+// swagger:model InstallWebhookParams
+type InstallWebhookParams struct {
+ WebhookEndpointType WebhookEndpointType `json:"webhook_endpoint_type,omitempty"`
+ InsecureSSL bool `json:"insecure_ssl,omitempty"`
+}
+
+// swagger:model HookInfo
+type HookInfo struct {
+ ID int64 `json:"id,omitempty"`
+ URL string `json:"url,omitempty"`
+ Events []string `json:"events,omitempty"`
+ Active bool `json:"active,omitempty"`
+ InsecureSSL bool `json:"insecure_ssl,omitempty"`
+}
+
+type CertificateBundle struct {
+ RootCertificates map[string][]byte `json:"root_certificates,omitempty"`
+}
+
+// swagger:model ForgeEntity
+type UpdateSystemInfoParams struct {
+ OSName string `json:"os_name,omitempty"`
+ OSVersion string `json:"os_version,omitempty"`
+ AgentID *int64 `json:"agent_id,omitempty"`
+}
+
+type ForgeEntity struct {
+ Owner string `json:"owner,omitempty"`
+ Name string `json:"name,omitempty"`
+ ID string `json:"id,omitempty"`
+ EntityType ForgeEntityType `json:"entity_type,omitempty"`
+ Credentials ForgeCredentials `json:"credentials,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancing_type,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+
+ WebhookSecret string `json:"-"`
+}
+
+func (g ForgeEntity) GetCreatedAt() time.Time {
+ return g.CreatedAt
+}
+
+func (g ForgeEntity) GetForgeType() (EndpointType, error) {
+ if g.Credentials.ForgeType == "" {
+ return "", fmt.Errorf("credentials forge type is empty")
+ }
+ return g.Credentials.ForgeType, nil
+}
+
+func (g ForgeEntity) ForgeURL() string {
+ switch g.EntityType {
+ case ForgeEntityTypeRepository:
+ return fmt.Sprintf("%s/%s/%s", g.Credentials.BaseURL, g.Owner, g.Name)
+ case ForgeEntityTypeOrganization:
+ return fmt.Sprintf("%s/%s", g.Credentials.BaseURL, g.Owner)
+ case ForgeEntityTypeEnterprise:
+ return fmt.Sprintf("%s/enterprises/%s", g.Credentials.BaseURL, g.Owner)
+ }
+ return ""
+}
+
+func (g ForgeEntity) GetPoolBalancerType() PoolBalancerType {
+ if g.PoolBalancerType == "" {
+ return PoolBalancerTypeRoundRobin
+ }
+ return g.PoolBalancerType
+}
+
+func (g ForgeEntity) LabelScope() string {
+ switch g.EntityType {
+ case ForgeEntityTypeRepository:
+ return MetricsLabelRepositoryScope
+ case ForgeEntityTypeOrganization:
+ return MetricsLabelOrganizationScope
+ case ForgeEntityTypeEnterprise:
+ return MetricsLabelEnterpriseScope
+ }
+ return ""
+}
+
+func (g ForgeEntity) String() string {
+ switch g.EntityType {
+ case ForgeEntityTypeRepository:
+ return fmt.Sprintf("%s/%s", g.Owner, g.Name)
+ case ForgeEntityTypeOrganization, ForgeEntityTypeEnterprise:
+ return g.Owner
+ }
+ return ""
+}
+
+func (g ForgeEntity) GetIDAsUUID() (uuid.UUID, error) {
+ if g.ID == "" {
+ return uuid.Nil, nil
+ }
+ id, err := uuid.Parse(g.ID)
+ if err != nil {
+ return uuid.Nil, fmt.Errorf("failed to parse entity ID: %w", err)
+ }
+ return id, nil
+}
+
+// used by swagger client generated code
+// swagger:model ForgeEndpoints
+type ForgeEndpoints []ForgeEndpoint
+
+// swagger:model ForgeEndpoint
+type ForgeEndpoint struct {
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ APIBaseURL string `json:"api_base_url,omitempty"`
+ UploadBaseURL string `json:"upload_base_url,omitempty"`
+ BaseURL string `json:"base_url,omitempty"`
+ CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+
+ EndpointType EndpointType `json:"endpoint_type,omitempty"`
+}
+
+type RepositoryFilter struct {
+ Owner string
+ Name string
+ Endpoint string
+}
+
+type OrganizationFilter struct {
+ Name string
+ Endpoint string
+}
+
+type EnterpriseFilter struct {
+ Name string
+ Endpoint string
+}
diff --git a/params/requests.go b/params/requests.go
index c7c50eb5..c9021434 100644
--- a/params/requests.go
+++ b/params/requests.go
@@ -15,147 +15,203 @@
package params
import (
+ "crypto/x509"
"encoding/json"
+ "encoding/pem"
"fmt"
+ "net/url"
- "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/runner/providers/common"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
)
-const DefaultRunnerPrefix = "garm"
+const (
+ DefaultRunnerPrefix string = "garm"
+ httpsScheme string = "https"
+ httpScheme string = "http"
+)
type InstanceRequest struct {
- Name string `json:"name"`
- OSType OSType `json:"os_type"`
- OSVersion string `json:"os_version"`
+ Name string `json:"name"`
+ OSType commonParams.OSType `json:"os_type"`
+ OSVersion string `json:"os_version"`
}
+// swagger:model CreateRepoParams
type CreateRepoParams struct {
- Owner string `json:"owner"`
- Name string `json:"name"`
- CredentialsName string `json:"credentials_name"`
- WebhookSecret string `json:"webhook_secret"`
+ Owner string `json:"owner,omitempty"`
+ Name string `json:"name,omitempty"`
+ CredentialsName string `json:"credentials_name,omitempty"`
+ WebhookSecret string `json:"webhook_secret,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
+ ForgeType EndpointType `json:"forge_type,omitempty"`
}
func (c *CreateRepoParams) Validate() error {
if c.Owner == "" {
- return errors.NewBadRequestError("missing owner")
+ return runnerErrors.NewBadRequestError("missing owner")
}
if c.Name == "" {
- return errors.NewBadRequestError("missing repo name")
+ return runnerErrors.NewBadRequestError("missing repo name")
}
if c.CredentialsName == "" {
- return errors.NewBadRequestError("missing credentials name")
+ return runnerErrors.NewBadRequestError("missing credentials name")
}
if c.WebhookSecret == "" {
- return errors.NewMissingSecretError("missing secret")
+ return runnerErrors.NewMissingSecretError("missing secret")
}
+
+ switch c.ForgeType {
+ case GithubEndpointType, GiteaEndpointType, AutoEndpointType:
+ break
+ default:
+ return runnerErrors.NewBadRequestError("invalid forge type")
+ }
+
+ switch c.PoolBalancerType {
+ case PoolBalancerTypeRoundRobin, PoolBalancerTypePack, PoolBalancerTypeNone:
+ default:
+ return runnerErrors.NewBadRequestError("invalid pool balancer type")
+ }
+
return nil
}
+// swagger:model CreateOrgParams
type CreateOrgParams struct {
- Name string `json:"name"`
- CredentialsName string `json:"credentials_name"`
- WebhookSecret string `json:"webhook_secret"`
+ Name string `json:"name,omitempty"`
+ CredentialsName string `json:"credentials_name,omitempty"`
+ WebhookSecret string `json:"webhook_secret,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
+ ForgeType EndpointType `json:"forge_type,omitempty"`
}
func (c *CreateOrgParams) Validate() error {
if c.Name == "" {
- return errors.NewBadRequestError("missing org name")
+ return runnerErrors.NewBadRequestError("missing org name")
}
if c.CredentialsName == "" {
- return errors.NewBadRequestError("missing credentials name")
+ return runnerErrors.NewBadRequestError("missing credentials name")
}
if c.WebhookSecret == "" {
- return errors.NewMissingSecretError("missing secret")
+ return runnerErrors.NewMissingSecretError("missing secret")
+ }
+
+ switch c.ForgeType {
+ case GithubEndpointType, GiteaEndpointType, AutoEndpointType:
+ break
+ default:
+ return runnerErrors.NewBadRequestError("invalid forge type")
+ }
+
+ switch c.PoolBalancerType {
+ case PoolBalancerTypeRoundRobin, PoolBalancerTypePack, PoolBalancerTypeNone:
+ default:
+ return runnerErrors.NewBadRequestError("invalid pool balancer type")
}
return nil
}
+// swagger:model CreateEnterpriseParams
type CreateEnterpriseParams struct {
- Name string `json:"name"`
- CredentialsName string `json:"credentials_name"`
- WebhookSecret string `json:"webhook_secret"`
+ Name string `json:"name,omitempty"`
+ CredentialsName string `json:"credentials_name,omitempty"`
+ WebhookSecret string `json:"webhook_secret,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
}
func (c *CreateEnterpriseParams) Validate() error {
if c.Name == "" {
- return errors.NewBadRequestError("missing enterprise name")
+ return runnerErrors.NewBadRequestError("missing enterprise name")
}
if c.CredentialsName == "" {
- return errors.NewBadRequestError("missing credentials name")
+ return runnerErrors.NewBadRequestError("missing credentials name")
}
if c.WebhookSecret == "" {
- return errors.NewMissingSecretError("missing secret")
+ return runnerErrors.NewMissingSecretError("missing secret")
+ }
+
+ switch c.PoolBalancerType {
+ case PoolBalancerTypeRoundRobin, PoolBalancerTypePack, PoolBalancerTypeNone:
+ default:
+ return runnerErrors.NewBadRequestError("invalid pool balancer type")
}
return nil
}
// NewUserParams holds the needed information to create
// a new user
+// swagger:model NewUserParams
type NewUserParams struct {
- Email string `json:"email"`
- Username string `json:"username"`
- FullName string `json:"full_name"`
- Password string `json:"password"`
+ Email string `json:"email,omitempty"`
+ Username string `json:"username,omitempty"`
+ FullName string `json:"full_name,omitempty"`
+ Password string `json:"password,omitempty"`
IsAdmin bool `json:"-"`
Enabled bool `json:"-"`
}
+// swagger:model UpdatePoolParams
type UpdatePoolParams struct {
RunnerPrefix
- Tags []string `json:"tags,omitempty"`
- Enabled *bool `json:"enabled,omitempty"`
- MaxRunners *uint `json:"max_runners,omitempty"`
- MinIdleRunners *uint `json:"min_idle_runners,omitempty"`
- RunnerBootstrapTimeout *uint `json:"runner_bootstrap_timeout,omitempty"`
- Image string `json:"image"`
- Flavor string `json:"flavor"`
- OSType OSType `json:"os_type"`
- OSArch OSArch `json:"os_arch"`
- ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
+ MaxRunners *uint `json:"max_runners,omitempty"`
+ MinIdleRunners *uint `json:"min_idle_runners,omitempty"`
+ RunnerBootstrapTimeout *uint `json:"runner_bootstrap_timeout,omitempty"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
// GithubRunnerGroup is the github runner group in which the runners of this
// pool will be added to.
// The runner group must be created by someone with access to the enterprise.
GitHubRunnerGroup *string `json:"github-runner-group,omitempty"`
+ Priority *uint `json:"priority,omitempty"`
}
type CreateInstanceParams struct {
- Name string
- OSType OSType
- OSArch OSArch
- Status common.InstanceStatus
- RunnerStatus common.RunnerStatus
- CallbackURL string
- MetadataURL string
+ Name string `json:"name,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ Status commonParams.InstanceStatus `json:"status,omitempty"`
+ RunnerStatus RunnerStatus `json:"runner_status,omitempty"`
+ CallbackURL string `json:"callback_url,omitempty"`
+ MetadataURL string `json:"metadata_url,omitempty"`
// GithubRunnerGroup is the github runner group to which the runner belongs.
// The runner group must be created by someone with access to the enterprise.
- GitHubRunnerGroup string
- CreateAttempt int `json:"-"`
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+ CreateAttempt int `json:"-"`
+ AgentID int64 `json:"-"`
+ AditionalLabels []string `json:"aditional_labels,omitempty"`
+ JitConfiguration map[string]string `json:"jit_configuration,omitempty"`
}
+// swagger:model CreatePoolParams
type CreatePoolParams struct {
RunnerPrefix
- ProviderName string `json:"provider_name"`
- MaxRunners uint `json:"max_runners"`
- MinIdleRunners uint `json:"min_idle_runners"`
- Image string `json:"image"`
- Flavor string `json:"flavor"`
- OSType OSType `json:"os_type"`
- OSArch OSArch `json:"os_arch"`
- Tags []string `json:"tags"`
- Enabled bool `json:"enabled"`
- RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout"`
- ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
+ ProviderName string `json:"provider_name,omitempty"`
+ MaxRunners uint `json:"max_runners,omitempty"`
+ MinIdleRunners uint `json:"min_idle_runners,omitempty"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Enabled bool `json:"enabled,omitempty"`
+ RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout,omitempty"`
+ ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
// GithubRunnerGroup is the github runner group in which the runners of this
// pool will be added to.
// The runner group must be created by someone with access to the enterprise.
- GitHubRunnerGroup string `json:"github-runner-group"`
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+ Priority uint `json:"priority,omitempty"`
}
func (p *CreatePoolParams) Validate() error {
@@ -194,45 +250,540 @@ type UpdateInstanceParams struct {
OSVersion string `json:"os_version,omitempty"`
// Addresses is a list of IP addresses the provider reports
// for this instance.
- Addresses []Address `json:"addresses,omitempty"`
+ Addresses []commonParams.Address `json:"addresses,omitempty"`
// Status is the status of the instance inside the provider (eg: running, stopped, etc)
- Status common.InstanceStatus `json:"status,omitempty"`
- RunnerStatus common.RunnerStatus `json:"runner_status,omitempty"`
- ProviderFault []byte `json:"provider_fault,omitempty"`
- AgentID int64 `json:"-"`
- CreateAttempt int `json:"-"`
- TokenFetched *bool `json:"-"`
+ Status commonParams.InstanceStatus `json:"status,omitempty"`
+ RunnerStatus RunnerStatus `json:"runner_status,omitempty"`
+ ProviderFault []byte `json:"provider_fault,omitempty"`
+ AgentID int64 `json:"-"`
+ CreateAttempt int `json:"-"`
+ TokenFetched *bool `json:"-"`
+ JitConfiguration map[string]string `json:"-"`
}
type UpdateUserParams struct {
- FullName string `json:"full_name"`
- Password string `json:"password"`
- Enabled *bool `json:"enabled"`
+ FullName string `json:"full_name,omitempty"`
+ Password string `json:"password,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
}
+// swagger:model PasswordLoginParams
// PasswordLoginParams holds information used during
// password authentication, that will be passed to a
// password login function
type PasswordLoginParams struct {
- Username string `json:"username"`
- Password string `json:"password"`
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
}
// Validate checks if the username and password are set
func (p PasswordLoginParams) Validate() error {
if p.Username == "" || p.Password == "" {
- return errors.ErrUnauthorized
+ return runnerErrors.ErrUnauthorized
}
return nil
}
-type UpdateRepositoryParams struct {
- CredentialsName string `json:"credentials_name"`
- WebhookSecret string `json:"webhook_secret"`
+// swagger:model UpdateEntityParams
+type UpdateEntityParams struct {
+ CredentialsName string `json:"credentials_name,omitempty"`
+ WebhookSecret string `json:"webhook_secret,omitempty"`
+ PoolBalancerType PoolBalancerType `json:"pool_balancer_type,omitempty"`
}
type InstanceUpdateMessage struct {
- Status common.RunnerStatus `json:"status"`
- Message string `json:"message"`
- AgentID *int64 `json:"agent_id"`
+ Status RunnerStatus `json:"status,omitempty"`
+ Message string `json:"message,omitempty"`
+ AgentID *int64 `json:"agent_id,omitempty"`
+}
+
+// swagger:model CreateGithubEndpointParams
+type CreateGithubEndpointParams struct {
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ APIBaseURL string `json:"api_base_url,omitempty"`
+ UploadBaseURL string `json:"upload_base_url,omitempty"`
+ BaseURL string `json:"base_url,omitempty"`
+ CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
+}
+
+func (c CreateGithubEndpointParams) Validate() error {
+ if c.APIBaseURL == "" {
+ return runnerErrors.NewBadRequestError("missing api_base_url")
+ }
+
+ url, err := url.Parse(c.APIBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+
+ if c.UploadBaseURL == "" {
+ return runnerErrors.NewBadRequestError("missing upload_base_url")
+ }
+
+ url, err = url.Parse(c.UploadBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid upload_base_url")
+ }
+
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+
+ if c.BaseURL == "" {
+ return runnerErrors.NewBadRequestError("missing base_url")
+ }
+
+ url, err = url.Parse(c.BaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid base_url")
+ }
+
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+
+ if c.CACertBundle != nil {
+ block, _ := pem.Decode(c.CACertBundle)
+ if block == nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ if _, err := x509.ParseCertificates(block.Bytes); err != nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model UpdateGithubEndpointParams
+type UpdateGithubEndpointParams struct {
+ Description *string `json:"description,omitempty"`
+ APIBaseURL *string `json:"api_base_url,omitempty"`
+ UploadBaseURL *string `json:"upload_base_url,omitempty"`
+ BaseURL *string `json:"base_url,omitempty"`
+ CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
+}
+
+func (u UpdateGithubEndpointParams) Validate() error {
+ if u.APIBaseURL != nil {
+ url, err := url.Parse(*u.APIBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ }
+
+ if u.UploadBaseURL != nil {
+ url, err := url.Parse(*u.UploadBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid upload_base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ }
+
+ if u.BaseURL != nil {
+ url, err := url.Parse(*u.BaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ }
+
+ if u.CACertBundle != nil {
+ block, _ := pem.Decode(u.CACertBundle)
+ if block == nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ if _, err := x509.ParseCertificates(block.Bytes); err != nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model GithubPAT
+type GithubPAT struct {
+ OAuth2Token string `json:"oauth2_token,omitempty"`
+}
+
+// swagger:model GithubApp
+type GithubApp struct {
+ AppID int64 `json:"app_id,omitempty"`
+ InstallationID int64 `json:"installation_id,omitempty"`
+ PrivateKeyBytes []byte `json:"private_key_bytes,omitempty"`
+}
+
+func (g GithubApp) Validate() error {
+ if g.AppID == 0 {
+ return runnerErrors.NewBadRequestError("missing app_id")
+ }
+
+ if g.InstallationID == 0 {
+ return runnerErrors.NewBadRequestError("missing installation_id")
+ }
+
+ if len(g.PrivateKeyBytes) == 0 {
+ return runnerErrors.NewBadRequestError("missing private_key_bytes")
+ }
+
+ block, _ := pem.Decode(g.PrivateKeyBytes)
+ if block == nil {
+ return runnerErrors.NewBadRequestError("invalid private_key_bytes")
+ }
+ // Parse the private key as PCKS1
+ _, err := x509.ParsePKCS1PrivateKey(block.Bytes)
+ if err != nil {
+ return fmt.Errorf("parsing private_key_path: %w", err)
+ }
+
+ return nil
+}
+
+// swagger:model CreateGithubCredentialsParams
+type CreateGithubCredentialsParams struct {
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ Endpoint string `json:"endpoint,omitempty"`
+ AuthType ForgeAuthType `json:"auth_type,omitempty"`
+ PAT GithubPAT `json:"pat,omitempty"`
+ App GithubApp `json:"app,omitempty"`
+}
+
+func (c CreateGithubCredentialsParams) Validate() error {
+ if c.Name == "" {
+ return runnerErrors.NewBadRequestError("missing name")
+ }
+
+ if c.Endpoint == "" {
+ return runnerErrors.NewBadRequestError("missing endpoint")
+ }
+
+ switch c.AuthType {
+ case ForgeAuthTypePAT, ForgeAuthTypeApp:
+ default:
+ return runnerErrors.NewBadRequestError("invalid auth_type")
+ }
+
+ if c.AuthType == ForgeAuthTypePAT {
+ if c.PAT.OAuth2Token == "" {
+ return runnerErrors.NewBadRequestError("missing oauth2_token")
+ }
+ }
+
+ if c.AuthType == ForgeAuthTypeApp {
+ if err := c.App.Validate(); err != nil {
+ return fmt.Errorf("invalid app: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// swagger:model UpdateGithubCredentialsParams
+type UpdateGithubCredentialsParams struct {
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ PAT *GithubPAT `json:"pat,omitempty"`
+ App *GithubApp `json:"app,omitempty"`
+}
+
+func (u UpdateGithubCredentialsParams) Validate() error {
+ if u.PAT != nil && u.App != nil {
+ return runnerErrors.NewBadRequestError("cannot update both PAT and App")
+ }
+
+ if u.PAT != nil {
+ if u.PAT.OAuth2Token == "" {
+ return runnerErrors.NewBadRequestError("missing oauth2_token")
+ }
+ }
+
+ if u.App != nil {
+ if err := u.App.Validate(); err != nil {
+ return fmt.Errorf("invalid app: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// swagger:model UpdateControllerParams
+type UpdateControllerParams struct {
+ MetadataURL *string `json:"metadata_url,omitempty"`
+ CallbackURL *string `json:"callback_url,omitempty"`
+ WebhookURL *string `json:"webhook_url,omitempty"`
+ MinimumJobAgeBackoff *uint `json:"minimum_job_age_backoff,omitempty"`
+}
+
+func (u UpdateControllerParams) Validate() error {
+ if u.MetadataURL != nil {
+ u, err := url.Parse(*u.MetadataURL)
+ if err != nil || u.Scheme == "" || u.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid metadata_url")
+ }
+ }
+
+ if u.CallbackURL != nil {
+ u, err := url.Parse(*u.CallbackURL)
+ if err != nil || u.Scheme == "" || u.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid callback_url")
+ }
+ }
+
+ if u.WebhookURL != nil {
+ u, err := url.Parse(*u.WebhookURL)
+ if err != nil || u.Scheme == "" || u.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid webhook_url")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model CreateScaleSetParams
+type CreateScaleSetParams struct {
+ RunnerPrefix
+
+ Name string `json:"name"`
+ DisableUpdate bool `json:"disable_update"`
+ ScaleSetID int `json:"scale_set_id"`
+
+ ProviderName string `json:"provider_name,omitempty"`
+ MaxRunners uint `json:"max_runners,omitempty"`
+ MinIdleRunners uint `json:"min_idle_runners,omitempty"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Enabled bool `json:"enabled,omitempty"`
+ RunnerBootstrapTimeout uint `json:"runner_bootstrap_timeout,omitempty"`
+ ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
+ // GithubRunnerGroup is the github runner group in which the runners of this
+ // pool will be added to.
+ // The runner group must be created by someone with access to the enterprise.
+ GitHubRunnerGroup string `json:"github-runner-group,omitempty"`
+}
+
+func (s *CreateScaleSetParams) Validate() error {
+ if s.ProviderName == "" {
+ return fmt.Errorf("missing provider")
+ }
+
+ if s.MinIdleRunners > s.MaxRunners {
+ return fmt.Errorf("min_idle_runners cannot be larger than max_runners")
+ }
+
+ if s.MaxRunners == 0 {
+ return fmt.Errorf("max_runners cannot be 0")
+ }
+
+ if s.Flavor == "" {
+ return fmt.Errorf("missing flavor")
+ }
+
+ if s.Image == "" {
+ return fmt.Errorf("missing image")
+ }
+
+ if s.Name == "" {
+ return fmt.Errorf("missing scale set name")
+ }
+
+ return nil
+}
+
+// swagger:model UpdateScaleSetParams
+type UpdateScaleSetParams struct {
+ RunnerPrefix
+
+ Name string `json:"name,omitempty"`
+ Enabled *bool `json:"enabled,omitempty"`
+ MaxRunners *uint `json:"max_runners,omitempty"`
+ MinIdleRunners *uint `json:"min_idle_runners,omitempty"`
+ RunnerBootstrapTimeout *uint `json:"runner_bootstrap_timeout,omitempty"`
+ Image string `json:"image,omitempty"`
+ Flavor string `json:"flavor,omitempty"`
+ OSType commonParams.OSType `json:"os_type,omitempty"`
+ OSArch commonParams.OSArch `json:"os_arch,omitempty"`
+ ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
+ // GithubRunnerGroup is the github runner group in which the runners of this
+ // pool will be added to.
+ // The runner group must be created by someone with access to the enterprise.
+ GitHubRunnerGroup *string `json:"runner_group,omitempty"`
+ State *ScaleSetState `json:"state"`
+ ExtendedState *string `json:"extended_state"`
+ ScaleSetID int `json:"-"`
+}
+
+// swagger:model CreateGiteaEndpointParams
+type CreateGiteaEndpointParams struct {
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ APIBaseURL string `json:"api_base_url,omitempty"`
+ BaseURL string `json:"base_url,omitempty"`
+ CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
+}
+
+func (c CreateGiteaEndpointParams) Validate() error {
+ if c.APIBaseURL == "" {
+ return runnerErrors.NewBadRequestError("missing api_base_url")
+ }
+
+ url, err := url.Parse(c.APIBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+
+ if c.BaseURL == "" {
+ return runnerErrors.NewBadRequestError("missing base_url")
+ }
+
+ url, err = url.Parse(c.BaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid base_url")
+ }
+
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+
+ if c.CACertBundle != nil {
+ block, _ := pem.Decode(c.CACertBundle)
+ if block == nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ if _, err := x509.ParseCertificates(block.Bytes); err != nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model UpdateGiteaEndpointParams
+type UpdateGiteaEndpointParams struct {
+ Description *string `json:"description,omitempty"`
+ APIBaseURL *string `json:"api_base_url,omitempty"`
+ BaseURL *string `json:"base_url,omitempty"`
+ CACertBundle []byte `json:"ca_cert_bundle,omitempty"`
+}
+
+func (u UpdateGiteaEndpointParams) Validate() error {
+ if u.APIBaseURL != nil {
+ url, err := url.Parse(*u.APIBaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ }
+
+ if u.BaseURL != nil {
+ url, err := url.Parse(*u.BaseURL)
+ if err != nil || url.Scheme == "" || url.Host == "" {
+ return runnerErrors.NewBadRequestError("invalid base_url")
+ }
+ switch url.Scheme {
+ case httpsScheme, httpScheme:
+ default:
+ return runnerErrors.NewBadRequestError("invalid api_base_url")
+ }
+ }
+
+ if u.CACertBundle != nil {
+ block, _ := pem.Decode(u.CACertBundle)
+ if block == nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ if _, err := x509.ParseCertificates(block.Bytes); err != nil {
+ return runnerErrors.NewBadRequestError("invalid ca_cert_bundle")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model CreateGiteaCredentialsParams
+type CreateGiteaCredentialsParams struct {
+ Name string `json:"name,omitempty"`
+ Description string `json:"description,omitempty"`
+ Endpoint string `json:"endpoint,omitempty"`
+ AuthType ForgeAuthType `json:"auth_type,omitempty"`
+ PAT GithubPAT `json:"pat,omitempty"`
+ App GithubApp `json:"app,omitempty"`
+}
+
+func (c CreateGiteaCredentialsParams) Validate() error {
+ if c.Name == "" {
+ return runnerErrors.NewBadRequestError("missing name")
+ }
+
+ if c.Endpoint == "" {
+ return runnerErrors.NewBadRequestError("missing endpoint")
+ }
+
+ switch c.AuthType {
+ case ForgeAuthTypePAT:
+ default:
+ return runnerErrors.NewBadRequestError("invalid auth_type: %s", c.AuthType)
+ }
+
+ if c.AuthType == ForgeAuthTypePAT {
+ if c.PAT.OAuth2Token == "" {
+ return runnerErrors.NewBadRequestError("missing oauth2_token")
+ }
+ }
+
+ return nil
+}
+
+// swagger:model UpdateGiteaCredentialsParams
+type UpdateGiteaCredentialsParams struct {
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ PAT *GithubPAT `json:"pat,omitempty"`
+}
+
+func (u UpdateGiteaCredentialsParams) Validate() error {
+ if u.PAT != nil {
+ if u.PAT.OAuth2Token == "" {
+ return runnerErrors.NewBadRequestError("missing oauth2_token")
+ }
+ }
+
+ return nil
}
diff --git a/runner/common.go b/runner/common.go
new file mode 100644
index 00000000..b1682c0c
--- /dev/null
+++ b/runner/common.go
@@ -0,0 +1,31 @@
+package runner
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+func (r *Runner) ResolveForgeCredentialByName(ctx context.Context, credentialsName string) (params.ForgeCredentials, error) {
+ githubCred, err := r.store.GetGithubCredentialsByName(ctx, credentialsName, false)
+ if err != nil && !errors.Is(err, runnerErrors.ErrNotFound) {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+ giteaCred, err := r.store.GetGiteaCredentialsByName(ctx, credentialsName, false)
+ if err != nil && !errors.Is(err, runnerErrors.ErrNotFound) {
+ return params.ForgeCredentials{}, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ if githubCred.ID != 0 && giteaCred.ID != 0 {
+ return params.ForgeCredentials{}, runnerErrors.NewBadRequestError("credentials %s are defined for both GitHub and Gitea, please specify the forge type", credentialsName)
+ }
+ if githubCred.ID != 0 {
+ return githubCred, nil
+ }
+ if giteaCred.ID != 0 {
+ return giteaCred, nil
+ }
+ return params.ForgeCredentials{}, runnerErrors.NewBadRequestError("credentials %s not found", credentialsName)
+}
diff --git a/runner/common/mocks/GithubClient.go b/runner/common/mocks/GithubClient.go
index 219b1ba0..c1dbeae9 100644
--- a/runner/common/mocks/GithubClient.go
+++ b/runner/common/mocks/GithubClient.go
@@ -1,12 +1,16 @@
-// Code generated by mockery v2.22.1. DO NOT EDIT.
+// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
- github "github.com/google/go-github/v48/github"
+ github "github.com/google/go-github/v72/github"
mock "github.com/stretchr/testify/mock"
+
+ params "github.com/cloudbase/garm/params"
+
+ url "net/url"
)
// GithubClient is an autogenerated mock type for the GithubClient type
@@ -14,34 +18,105 @@ type GithubClient struct {
mock.Mock
}
-// CreateOrganizationRegistrationToken provides a mock function with given fields: ctx, owner
-func (_m *GithubClient) CreateOrganizationRegistrationToken(ctx context.Context, owner string) (*github.RegistrationToken, *github.Response, error) {
- ret := _m.Called(ctx, owner)
+type GithubClient_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *GithubClient) EXPECT() *GithubClient_Expecter {
+ return &GithubClient_Expecter{mock: &_m.Mock}
+}
+
+// CreateEntityHook provides a mock function with given fields: ctx, hook
+func (_m *GithubClient) CreateEntityHook(ctx context.Context, hook *github.Hook) (*github.Hook, error) {
+ ret := _m.Called(ctx, hook)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityHook")
+ }
+
+ var r0 *github.Hook
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, *github.Hook) (*github.Hook, error)); ok {
+ return rf(ctx, hook)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *github.Hook) *github.Hook); ok {
+ r0 = rf(ctx, hook)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *github.Hook) error); ok {
+ r1 = rf(ctx, hook)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubClient_CreateEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityHook'
+type GithubClient_CreateEntityHook_Call struct {
+ *mock.Call
+}
+
+// CreateEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - hook *github.Hook
+func (_e *GithubClient_Expecter) CreateEntityHook(ctx interface{}, hook interface{}) *GithubClient_CreateEntityHook_Call {
+ return &GithubClient_CreateEntityHook_Call{Call: _e.mock.On("CreateEntityHook", ctx, hook)}
+}
+
+func (_c *GithubClient_CreateEntityHook_Call) Run(run func(ctx context.Context, hook *github.Hook)) *GithubClient_CreateEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.Hook))
+ })
+ return _c
+}
+
+func (_c *GithubClient_CreateEntityHook_Call) Return(ret *github.Hook, err error) *GithubClient_CreateEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubClient_CreateEntityHook_Call) RunAndReturn(run func(context.Context, *github.Hook) (*github.Hook, error)) *GithubClient_CreateEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateEntityRegistrationToken provides a mock function with given fields: ctx
+func (_m *GithubClient) CreateEntityRegistrationToken(ctx context.Context) (*github.RegistrationToken, *github.Response, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityRegistrationToken")
+ }
var r0 *github.RegistrationToken
var r1 *github.Response
var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string) (*github.RegistrationToken, *github.Response, error)); ok {
- return rf(ctx, owner)
+ if rf, ok := ret.Get(0).(func(context.Context) (*github.RegistrationToken, *github.Response, error)); ok {
+ return rf(ctx)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) *github.RegistrationToken); ok {
- r0 = rf(ctx, owner)
+ if rf, ok := ret.Get(0).(func(context.Context) *github.RegistrationToken); ok {
+ r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*github.RegistrationToken)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string) *github.Response); ok {
- r1 = rf(ctx, owner)
+ if rf, ok := ret.Get(1).(func(context.Context) *github.Response); ok {
+ r1 = rf(ctx)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(*github.Response)
}
}
- if rf, ok := ret.Get(2).(func(context.Context, string) error); ok {
- r2 = rf(ctx, owner)
+ if rf, ok := ret.Get(2).(func(context.Context) error); ok {
+ r2 = rf(ctx)
} else {
r2 = ret.Error(2)
}
@@ -49,34 +124,229 @@ func (_m *GithubClient) CreateOrganizationRegistrationToken(ctx context.Context,
return r0, r1, r2
}
-// CreateRegistrationToken provides a mock function with given fields: ctx, owner, repo
-func (_m *GithubClient) CreateRegistrationToken(ctx context.Context, owner string, repo string) (*github.RegistrationToken, *github.Response, error) {
- ret := _m.Called(ctx, owner, repo)
+// GithubClient_CreateEntityRegistrationToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityRegistrationToken'
+type GithubClient_CreateEntityRegistrationToken_Call struct {
+ *mock.Call
+}
- var r0 *github.RegistrationToken
- var r1 *github.Response
- var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) (*github.RegistrationToken, *github.Response, error)); ok {
- return rf(ctx, owner, repo)
+// CreateEntityRegistrationToken is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubClient_Expecter) CreateEntityRegistrationToken(ctx interface{}) *GithubClient_CreateEntityRegistrationToken_Call {
+ return &GithubClient_CreateEntityRegistrationToken_Call{Call: _e.mock.On("CreateEntityRegistrationToken", ctx)}
+}
+
+func (_c *GithubClient_CreateEntityRegistrationToken_Call) Run(run func(ctx context.Context)) *GithubClient_CreateEntityRegistrationToken_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubClient_CreateEntityRegistrationToken_Call) Return(_a0 *github.RegistrationToken, _a1 *github.Response, _a2 error) *GithubClient_CreateEntityRegistrationToken_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubClient_CreateEntityRegistrationToken_Call) RunAndReturn(run func(context.Context) (*github.RegistrationToken, *github.Response, error)) *GithubClient_CreateEntityRegistrationToken_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubClient) DeleteEntityHook(ctx context.Context, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteEntityHook")
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string) *github.RegistrationToken); ok {
- r0 = rf(ctx, owner, repo)
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Response, error)); ok {
+ return rf(ctx, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Response); ok {
+ r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).(*github.RegistrationToken)
+ r0 = ret.Get(0).(*github.Response)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string) *github.Response); ok {
- r1 = rf(ctx, owner, repo)
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubClient_DeleteEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteEntityHook'
+type GithubClient_DeleteEntityHook_Call struct {
+ *mock.Call
+}
+
+// DeleteEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubClient_Expecter) DeleteEntityHook(ctx interface{}, id interface{}) *GithubClient_DeleteEntityHook_Call {
+ return &GithubClient_DeleteEntityHook_Call{Call: _e.mock.On("DeleteEntityHook", ctx, id)}
+}
+
+func (_c *GithubClient_DeleteEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubClient_DeleteEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubClient_DeleteEntityHook_Call) Return(ret *github.Response, err error) *GithubClient_DeleteEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubClient_DeleteEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Response, error)) *GithubClient_DeleteEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntity provides a mock function with no fields
+func (_m *GithubClient) GetEntity() params.ForgeEntity {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntity")
+ }
+
+ var r0 params.ForgeEntity
+ if rf, ok := ret.Get(0).(func() params.ForgeEntity); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(params.ForgeEntity)
+ }
+
+ return r0
+}
+
+// GithubClient_GetEntity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntity'
+type GithubClient_GetEntity_Call struct {
+ *mock.Call
+}
+
+// GetEntity is a helper method to define mock.On call
+func (_e *GithubClient_Expecter) GetEntity() *GithubClient_GetEntity_Call {
+ return &GithubClient_GetEntity_Call{Call: _e.mock.On("GetEntity")}
+}
+
+func (_c *GithubClient_GetEntity_Call) Run(run func()) *GithubClient_GetEntity_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *GithubClient_GetEntity_Call) Return(_a0 params.ForgeEntity) *GithubClient_GetEntity_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubClient_GetEntity_Call) RunAndReturn(run func() params.ForgeEntity) *GithubClient_GetEntity_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubClient) GetEntityHook(ctx context.Context, id int64) (*github.Hook, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityHook")
+ }
+
+ var r0 *github.Hook
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Hook, error)); ok {
+ return rf(ctx, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Hook); ok {
+ r0 = rf(ctx, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubClient_GetEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityHook'
+type GithubClient_GetEntityHook_Call struct {
+ *mock.Call
+}
+
+// GetEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubClient_Expecter) GetEntityHook(ctx interface{}, id interface{}) *GithubClient_GetEntityHook_Call {
+ return &GithubClient_GetEntityHook_Call{Call: _e.mock.On("GetEntityHook", ctx, id)}
+}
+
+func (_c *GithubClient_GetEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubClient_GetEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubClient_GetEntityHook_Call) Return(ret *github.Hook, err error) *GithubClient_GetEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubClient_GetEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Hook, error)) *GithubClient_GetEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityJITConfig provides a mock function with given fields: ctx, instance, pool, labels
+func (_m *GithubClient) GetEntityJITConfig(ctx context.Context, instance string, pool params.Pool, labels []string) (map[string]string, *github.Runner, error) {
+ ret := _m.Called(ctx, instance, pool, labels)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityJITConfig")
+ }
+
+ var r0 map[string]string
+ var r1 *github.Runner
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.Pool, []string) (map[string]string, *github.Runner, error)); ok {
+ return rf(ctx, instance, pool, labels)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.Pool, []string) map[string]string); ok {
+ r0 = rf(ctx, instance, pool, labels)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(map[string]string)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.Pool, []string) *github.Runner); ok {
+ r1 = rf(ctx, instance, pool, labels)
} else {
if ret.Get(1) != nil {
- r1 = ret.Get(1).(*github.Response)
+ r1 = ret.Get(1).(*github.Runner)
}
}
- if rf, ok := ret.Get(2).(func(context.Context, string, string) error); ok {
- r2 = rf(ctx, owner, repo)
+ if rf, ok := ret.Get(2).(func(context.Context, string, params.Pool, []string) error); ok {
+ r2 = rf(ctx, instance, pool, labels)
} else {
r2 = ret.Error(2)
}
@@ -84,10 +354,102 @@ func (_m *GithubClient) CreateRegistrationToken(ctx context.Context, owner strin
return r0, r1, r2
}
+// GithubClient_GetEntityJITConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityJITConfig'
+type GithubClient_GetEntityJITConfig_Call struct {
+ *mock.Call
+}
+
+// GetEntityJITConfig is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - pool params.Pool
+// - labels []string
+func (_e *GithubClient_Expecter) GetEntityJITConfig(ctx interface{}, instance interface{}, pool interface{}, labels interface{}) *GithubClient_GetEntityJITConfig_Call {
+ return &GithubClient_GetEntityJITConfig_Call{Call: _e.mock.On("GetEntityJITConfig", ctx, instance, pool, labels)}
+}
+
+func (_c *GithubClient_GetEntityJITConfig_Call) Run(run func(ctx context.Context, instance string, pool params.Pool, labels []string)) *GithubClient_GetEntityJITConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.Pool), args[3].([]string))
+ })
+ return _c
+}
+
+func (_c *GithubClient_GetEntityJITConfig_Call) Return(jitConfigMap map[string]string, runner *github.Runner, err error) *GithubClient_GetEntityJITConfig_Call {
+ _c.Call.Return(jitConfigMap, runner, err)
+ return _c
+}
+
+func (_c *GithubClient_GetEntityJITConfig_Call) RunAndReturn(run func(context.Context, string, params.Pool, []string) (map[string]string, *github.Runner, error)) *GithubClient_GetEntityJITConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityRunnerGroupIDByName provides a mock function with given fields: ctx, runnerGroupName
+func (_m *GithubClient) GetEntityRunnerGroupIDByName(ctx context.Context, runnerGroupName string) (int64, error) {
+ ret := _m.Called(ctx, runnerGroupName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityRunnerGroupIDByName")
+ }
+
+ var r0 int64
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (int64, error)); ok {
+ return rf(ctx, runnerGroupName)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) int64); ok {
+ r0 = rf(ctx, runnerGroupName)
+ } else {
+ r0 = ret.Get(0).(int64)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, runnerGroupName)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubClient_GetEntityRunnerGroupIDByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityRunnerGroupIDByName'
+type GithubClient_GetEntityRunnerGroupIDByName_Call struct {
+ *mock.Call
+}
+
+// GetEntityRunnerGroupIDByName is a helper method to define mock.On call
+// - ctx context.Context
+// - runnerGroupName string
+func (_e *GithubClient_Expecter) GetEntityRunnerGroupIDByName(ctx interface{}, runnerGroupName interface{}) *GithubClient_GetEntityRunnerGroupIDByName_Call {
+ return &GithubClient_GetEntityRunnerGroupIDByName_Call{Call: _e.mock.On("GetEntityRunnerGroupIDByName", ctx, runnerGroupName)}
+}
+
+func (_c *GithubClient_GetEntityRunnerGroupIDByName_Call) Run(run func(ctx context.Context, runnerGroupName string)) *GithubClient_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *GithubClient_GetEntityRunnerGroupIDByName_Call) Return(_a0 int64, _a1 error) *GithubClient_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *GithubClient_GetEntityRunnerGroupIDByName_Call) RunAndReturn(run func(context.Context, string) (int64, error)) *GithubClient_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetWorkflowJobByID provides a mock function with given fields: ctx, owner, repo, jobID
func (_m *GithubClient) GetWorkflowJobByID(ctx context.Context, owner string, repo string, jobID int64) (*github.WorkflowJob, *github.Response, error) {
ret := _m.Called(ctx, owner, repo, jobID)
+ if len(ret) == 0 {
+ panic("no return value specified for GetWorkflowJobByID")
+ }
+
var r0 *github.WorkflowJob
var r1 *github.Response
var r2 error
@@ -119,34 +481,184 @@ func (_m *GithubClient) GetWorkflowJobByID(ctx context.Context, owner string, re
return r0, r1, r2
}
-// ListOrganizationRunnerApplicationDownloads provides a mock function with given fields: ctx, owner
-func (_m *GithubClient) ListOrganizationRunnerApplicationDownloads(ctx context.Context, owner string) ([]*github.RunnerApplicationDownload, *github.Response, error) {
- ret := _m.Called(ctx, owner)
+// GithubClient_GetWorkflowJobByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetWorkflowJobByID'
+type GithubClient_GetWorkflowJobByID_Call struct {
+ *mock.Call
+}
+
+// GetWorkflowJobByID is a helper method to define mock.On call
+// - ctx context.Context
+// - owner string
+// - repo string
+// - jobID int64
+func (_e *GithubClient_Expecter) GetWorkflowJobByID(ctx interface{}, owner interface{}, repo interface{}, jobID interface{}) *GithubClient_GetWorkflowJobByID_Call {
+ return &GithubClient_GetWorkflowJobByID_Call{Call: _e.mock.On("GetWorkflowJobByID", ctx, owner, repo, jobID)}
+}
+
+func (_c *GithubClient_GetWorkflowJobByID_Call) Run(run func(ctx context.Context, owner string, repo string, jobID int64)) *GithubClient_GetWorkflowJobByID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubClient_GetWorkflowJobByID_Call) Return(_a0 *github.WorkflowJob, _a1 *github.Response, _a2 error) *GithubClient_GetWorkflowJobByID_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubClient_GetWorkflowJobByID_Call) RunAndReturn(run func(context.Context, string, string, int64) (*github.WorkflowJob, *github.Response, error)) *GithubClient_GetWorkflowJobByID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GithubBaseURL provides a mock function with no fields
+func (_m *GithubClient) GithubBaseURL() *url.URL {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for GithubBaseURL")
+ }
+
+ var r0 *url.URL
+ if rf, ok := ret.Get(0).(func() *url.URL); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*url.URL)
+ }
+ }
+
+ return r0
+}
+
+// GithubClient_GithubBaseURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GithubBaseURL'
+type GithubClient_GithubBaseURL_Call struct {
+ *mock.Call
+}
+
+// GithubBaseURL is a helper method to define mock.On call
+func (_e *GithubClient_Expecter) GithubBaseURL() *GithubClient_GithubBaseURL_Call {
+ return &GithubClient_GithubBaseURL_Call{Call: _e.mock.On("GithubBaseURL")}
+}
+
+func (_c *GithubClient_GithubBaseURL_Call) Run(run func()) *GithubClient_GithubBaseURL_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *GithubClient_GithubBaseURL_Call) Return(_a0 *url.URL) *GithubClient_GithubBaseURL_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubClient_GithubBaseURL_Call) RunAndReturn(run func() *url.URL) *GithubClient_GithubBaseURL_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityHooks provides a mock function with given fields: ctx, opts
+func (_m *GithubClient) ListEntityHooks(ctx context.Context, opts *github.ListOptions) ([]*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityHooks")
+ }
+
+ var r0 []*github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListOptions) ([]*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListOptions) []*github.Hook); ok {
+ r0 = rf(ctx, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *github.ListOptions) *github.Response); ok {
+ r1 = rf(ctx, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, *github.ListOptions) error); ok {
+ r2 = rf(ctx, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubClient_ListEntityHooks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityHooks'
+type GithubClient_ListEntityHooks_Call struct {
+ *mock.Call
+}
+
+// ListEntityHooks is a helper method to define mock.On call
+// - ctx context.Context
+// - opts *github.ListOptions
+func (_e *GithubClient_Expecter) ListEntityHooks(ctx interface{}, opts interface{}) *GithubClient_ListEntityHooks_Call {
+ return &GithubClient_ListEntityHooks_Call{Call: _e.mock.On("ListEntityHooks", ctx, opts)}
+}
+
+func (_c *GithubClient_ListEntityHooks_Call) Run(run func(ctx context.Context, opts *github.ListOptions)) *GithubClient_ListEntityHooks_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.ListOptions))
+ })
+ return _c
+}
+
+func (_c *GithubClient_ListEntityHooks_Call) Return(ret []*github.Hook, response *github.Response, err error) *GithubClient_ListEntityHooks_Call {
+ _c.Call.Return(ret, response, err)
+ return _c
+}
+
+func (_c *GithubClient_ListEntityHooks_Call) RunAndReturn(run func(context.Context, *github.ListOptions) ([]*github.Hook, *github.Response, error)) *GithubClient_ListEntityHooks_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityRunnerApplicationDownloads provides a mock function with given fields: ctx
+func (_m *GithubClient) ListEntityRunnerApplicationDownloads(ctx context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityRunnerApplicationDownloads")
+ }
var r0 []*github.RunnerApplicationDownload
var r1 *github.Response
var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]*github.RunnerApplicationDownload, *github.Response, error)); ok {
- return rf(ctx, owner)
+ if rf, ok := ret.Get(0).(func(context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error)); ok {
+ return rf(ctx)
}
- if rf, ok := ret.Get(0).(func(context.Context, string) []*github.RunnerApplicationDownload); ok {
- r0 = rf(ctx, owner)
+ if rf, ok := ret.Get(0).(func(context.Context) []*github.RunnerApplicationDownload); ok {
+ r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*github.RunnerApplicationDownload)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string) *github.Response); ok {
- r1 = rf(ctx, owner)
+ if rf, ok := ret.Get(1).(func(context.Context) *github.Response); ok {
+ r1 = rf(ctx)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(*github.Response)
}
}
- if rf, ok := ret.Get(2).(func(context.Context, string) error); ok {
- r2 = rf(ctx, owner)
+ if rf, ok := ret.Get(2).(func(context.Context) error); ok {
+ r2 = rf(ctx)
} else {
r2 = ret.Error(2)
}
@@ -154,34 +666,66 @@ func (_m *GithubClient) ListOrganizationRunnerApplicationDownloads(ctx context.C
return r0, r1, r2
}
-// ListOrganizationRunners provides a mock function with given fields: ctx, owner, opts
-func (_m *GithubClient) ListOrganizationRunners(ctx context.Context, owner string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
- ret := _m.Called(ctx, owner, opts)
+// GithubClient_ListEntityRunnerApplicationDownloads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityRunnerApplicationDownloads'
+type GithubClient_ListEntityRunnerApplicationDownloads_Call struct {
+ *mock.Call
+}
+
+// ListEntityRunnerApplicationDownloads is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubClient_Expecter) ListEntityRunnerApplicationDownloads(ctx interface{}) *GithubClient_ListEntityRunnerApplicationDownloads_Call {
+ return &GithubClient_ListEntityRunnerApplicationDownloads_Call{Call: _e.mock.On("ListEntityRunnerApplicationDownloads", ctx)}
+}
+
+func (_c *GithubClient_ListEntityRunnerApplicationDownloads_Call) Run(run func(ctx context.Context)) *GithubClient_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubClient_ListEntityRunnerApplicationDownloads_Call) Return(_a0 []*github.RunnerApplicationDownload, _a1 *github.Response, _a2 error) *GithubClient_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubClient_ListEntityRunnerApplicationDownloads_Call) RunAndReturn(run func(context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error)) *GithubClient_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityRunners provides a mock function with given fields: ctx, opts
+func (_m *GithubClient) ListEntityRunners(ctx context.Context, opts *github.ListRunnersOptions) (*github.Runners, *github.Response, error) {
+ ret := _m.Called(ctx, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityRunners")
+ }
var r0 *github.Runners
var r1 *github.Response
var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListOptions) (*github.Runners, *github.Response, error)); ok {
- return rf(ctx, owner, opts)
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListRunnersOptions) (*github.Runners, *github.Response, error)); ok {
+ return rf(ctx, opts)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListOptions) *github.Runners); ok {
- r0 = rf(ctx, owner, opts)
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListRunnersOptions) *github.Runners); ok {
+ r0 = rf(ctx, opts)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*github.Runners)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string, *github.ListOptions) *github.Response); ok {
- r1 = rf(ctx, owner, opts)
+ if rf, ok := ret.Get(1).(func(context.Context, *github.ListRunnersOptions) *github.Response); ok {
+ r1 = rf(ctx, opts)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(*github.Response)
}
}
- if rf, ok := ret.Get(2).(func(context.Context, string, *github.ListOptions) error); ok {
- r2 = rf(ctx, owner, opts)
+ if rf, ok := ret.Get(2).(func(context.Context, *github.ListRunnersOptions) error); ok {
+ r2 = rf(ctx, opts)
} else {
r2 = ret.Error(2)
}
@@ -189,95 +733,58 @@ func (_m *GithubClient) ListOrganizationRunners(ctx context.Context, owner strin
return r0, r1, r2
}
-// ListRunnerApplicationDownloads provides a mock function with given fields: ctx, owner, repo
-func (_m *GithubClient) ListRunnerApplicationDownloads(ctx context.Context, owner string, repo string) ([]*github.RunnerApplicationDownload, *github.Response, error) {
- ret := _m.Called(ctx, owner, repo)
-
- var r0 []*github.RunnerApplicationDownload
- var r1 *github.Response
- var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string) ([]*github.RunnerApplicationDownload, *github.Response, error)); ok {
- return rf(ctx, owner, repo)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string) []*github.RunnerApplicationDownload); ok {
- r0 = rf(ctx, owner, repo)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]*github.RunnerApplicationDownload)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string, string) *github.Response); ok {
- r1 = rf(ctx, owner, repo)
- } else {
- if ret.Get(1) != nil {
- r1 = ret.Get(1).(*github.Response)
- }
- }
-
- if rf, ok := ret.Get(2).(func(context.Context, string, string) error); ok {
- r2 = rf(ctx, owner, repo)
- } else {
- r2 = ret.Error(2)
- }
-
- return r0, r1, r2
+// GithubClient_ListEntityRunners_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityRunners'
+type GithubClient_ListEntityRunners_Call struct {
+ *mock.Call
}
-// ListRunners provides a mock function with given fields: ctx, owner, repo, opts
-func (_m *GithubClient) ListRunners(ctx context.Context, owner string, repo string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
- ret := _m.Called(ctx, owner, repo, opts)
-
- var r0 *github.Runners
- var r1 *github.Response
- var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.ListOptions) (*github.Runners, *github.Response, error)); ok {
- return rf(ctx, owner, repo, opts)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.ListOptions) *github.Runners); ok {
- r0 = rf(ctx, owner, repo, opts)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*github.Runners)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string, string, *github.ListOptions) *github.Response); ok {
- r1 = rf(ctx, owner, repo, opts)
- } else {
- if ret.Get(1) != nil {
- r1 = ret.Get(1).(*github.Response)
- }
- }
-
- if rf, ok := ret.Get(2).(func(context.Context, string, string, *github.ListOptions) error); ok {
- r2 = rf(ctx, owner, repo, opts)
- } else {
- r2 = ret.Error(2)
- }
-
- return r0, r1, r2
+// ListEntityRunners is a helper method to define mock.On call
+// - ctx context.Context
+// - opts *github.ListRunnersOptions
+func (_e *GithubClient_Expecter) ListEntityRunners(ctx interface{}, opts interface{}) *GithubClient_ListEntityRunners_Call {
+ return &GithubClient_ListEntityRunners_Call{Call: _e.mock.On("ListEntityRunners", ctx, opts)}
}
-// RemoveOrganizationRunner provides a mock function with given fields: ctx, owner, runnerID
-func (_m *GithubClient) RemoveOrganizationRunner(ctx context.Context, owner string, runnerID int64) (*github.Response, error) {
- ret := _m.Called(ctx, owner, runnerID)
+func (_c *GithubClient_ListEntityRunners_Call) Run(run func(ctx context.Context, opts *github.ListRunnersOptions)) *GithubClient_ListEntityRunners_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.ListRunnersOptions))
+ })
+ return _c
+}
+
+func (_c *GithubClient_ListEntityRunners_Call) Return(_a0 *github.Runners, _a1 *github.Response, _a2 error) *GithubClient_ListEntityRunners_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubClient_ListEntityRunners_Call) RunAndReturn(run func(context.Context, *github.ListRunnersOptions) (*github.Runners, *github.Response, error)) *GithubClient_ListEntityRunners_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// PingEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubClient) PingEntityHook(ctx context.Context, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for PingEntityHook")
+ }
var r0 *github.Response
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, int64) (*github.Response, error)); ok {
- return rf(ctx, owner, runnerID)
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Response, error)); ok {
+ return rf(ctx, id)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, int64) *github.Response); ok {
- r0 = rf(ctx, owner, runnerID)
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Response); ok {
+ r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*github.Response)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string, int64) error); ok {
- r1 = rf(ctx, owner, runnerID)
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
} else {
r1 = ret.Error(1)
}
@@ -285,25 +792,58 @@ func (_m *GithubClient) RemoveOrganizationRunner(ctx context.Context, owner stri
return r0, r1
}
-// RemoveRunner provides a mock function with given fields: ctx, owner, repo, runnerID
-func (_m *GithubClient) RemoveRunner(ctx context.Context, owner string, repo string, runnerID int64) (*github.Response, error) {
- ret := _m.Called(ctx, owner, repo, runnerID)
+// GithubClient_PingEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PingEntityHook'
+type GithubClient_PingEntityHook_Call struct {
+ *mock.Call
+}
- var r0 *github.Response
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*github.Response, error)); ok {
- return rf(ctx, owner, repo, runnerID)
+// PingEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubClient_Expecter) PingEntityHook(ctx interface{}, id interface{}) *GithubClient_PingEntityHook_Call {
+ return &GithubClient_PingEntityHook_Call{Call: _e.mock.On("PingEntityHook", ctx, id)}
+}
+
+func (_c *GithubClient_PingEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubClient_PingEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubClient_PingEntityHook_Call) Return(ret *github.Response, err error) *GithubClient_PingEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubClient_PingEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Response, error)) *GithubClient_PingEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RateLimit provides a mock function with given fields: ctx
+func (_m *GithubClient) RateLimit(ctx context.Context) (*github.RateLimits, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RateLimit")
}
- if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *github.Response); ok {
- r0 = rf(ctx, owner, repo, runnerID)
+
+ var r0 *github.RateLimits
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*github.RateLimits, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *github.RateLimits); ok {
+ r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).(*github.Response)
+ r0 = ret.Get(0).(*github.RateLimits)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok {
- r1 = rf(ctx, owner, repo, runnerID)
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
} else {
r1 = ret.Error(1)
}
@@ -311,13 +851,87 @@ func (_m *GithubClient) RemoveRunner(ctx context.Context, owner string, repo str
return r0, r1
}
-type mockConstructorTestingTNewGithubClient interface {
- mock.TestingT
- Cleanup(func())
+// GithubClient_RateLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RateLimit'
+type GithubClient_RateLimit_Call struct {
+ *mock.Call
+}
+
+// RateLimit is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubClient_Expecter) RateLimit(ctx interface{}) *GithubClient_RateLimit_Call {
+ return &GithubClient_RateLimit_Call{Call: _e.mock.On("RateLimit", ctx)}
+}
+
+func (_c *GithubClient_RateLimit_Call) Run(run func(ctx context.Context)) *GithubClient_RateLimit_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubClient_RateLimit_Call) Return(_a0 *github.RateLimits, _a1 error) *GithubClient_RateLimit_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *GithubClient_RateLimit_Call) RunAndReturn(run func(context.Context) (*github.RateLimits, error)) *GithubClient_RateLimit_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RemoveEntityRunner provides a mock function with given fields: ctx, runnerID
+func (_m *GithubClient) RemoveEntityRunner(ctx context.Context, runnerID int64) error {
+ ret := _m.Called(ctx, runnerID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RemoveEntityRunner")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
+ r0 = rf(ctx, runnerID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// GithubClient_RemoveEntityRunner_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveEntityRunner'
+type GithubClient_RemoveEntityRunner_Call struct {
+ *mock.Call
+}
+
+// RemoveEntityRunner is a helper method to define mock.On call
+// - ctx context.Context
+// - runnerID int64
+func (_e *GithubClient_Expecter) RemoveEntityRunner(ctx interface{}, runnerID interface{}) *GithubClient_RemoveEntityRunner_Call {
+ return &GithubClient_RemoveEntityRunner_Call{Call: _e.mock.On("RemoveEntityRunner", ctx, runnerID)}
+}
+
+func (_c *GithubClient_RemoveEntityRunner_Call) Run(run func(ctx context.Context, runnerID int64)) *GithubClient_RemoveEntityRunner_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubClient_RemoveEntityRunner_Call) Return(_a0 error) *GithubClient_RemoveEntityRunner_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubClient_RemoveEntityRunner_Call) RunAndReturn(run func(context.Context, int64) error) *GithubClient_RemoveEntityRunner_Call {
+ _c.Call.Return(run)
+ return _c
}
// NewGithubClient creates a new instance of GithubClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-func NewGithubClient(t mockConstructorTestingTNewGithubClient) *GithubClient {
+// The first argument is typically a *testing.T value.
+func NewGithubClient(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *GithubClient {
mock := &GithubClient{}
mock.Mock.Test(t)
diff --git a/runner/common/mocks/GithubEnterpriseClient.go b/runner/common/mocks/GithubEnterpriseClient.go
index 00a8dc50..5606e340 100644
--- a/runner/common/mocks/GithubEnterpriseClient.go
+++ b/runner/common/mocks/GithubEnterpriseClient.go
@@ -1,11 +1,11 @@
-// Code generated by mockery v2.22.1. DO NOT EDIT.
+// Code generated by mockery v2.42.0. DO NOT EDIT.
package mocks
import (
context "context"
- github "github.com/google/go-github/v48/github"
+ github "github.com/google/go-github/v72/github"
mock "github.com/stretchr/testify/mock"
)
@@ -18,6 +18,10 @@ type GithubEnterpriseClient struct {
func (_m *GithubEnterpriseClient) CreateRegistrationToken(ctx context.Context, enterprise string) (*github.RegistrationToken, *github.Response, error) {
ret := _m.Called(ctx, enterprise)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateRegistrationToken")
+ }
+
var r0 *github.RegistrationToken
var r1 *github.Response
var r2 error
@@ -49,10 +53,53 @@ func (_m *GithubEnterpriseClient) CreateRegistrationToken(ctx context.Context, e
return r0, r1, r2
}
+// GenerateEnterpriseJITConfig provides a mock function with given fields: ctx, enterprise, request
+func (_m *GithubEnterpriseClient) GenerateEnterpriseJITConfig(ctx context.Context, enterprise string, request *github.GenerateJITConfigRequest) (*github.JITRunnerConfig, *github.Response, error) {
+ ret := _m.Called(ctx, enterprise, request)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GenerateEnterpriseJITConfig")
+ }
+
+ var r0 *github.JITRunnerConfig
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.GenerateJITConfigRequest) (*github.JITRunnerConfig, *github.Response, error)); ok {
+ return rf(ctx, enterprise, request)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.GenerateJITConfigRequest) *github.JITRunnerConfig); ok {
+ r0 = rf(ctx, enterprise, request)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.JITRunnerConfig)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, *github.GenerateJITConfigRequest) *github.Response); ok {
+ r1 = rf(ctx, enterprise, request)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, *github.GenerateJITConfigRequest) error); ok {
+ r2 = rf(ctx, enterprise, request)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
// ListRunnerApplicationDownloads provides a mock function with given fields: ctx, enterprise
func (_m *GithubEnterpriseClient) ListRunnerApplicationDownloads(ctx context.Context, enterprise string) ([]*github.RunnerApplicationDownload, *github.Response, error) {
ret := _m.Called(ctx, enterprise)
+ if len(ret) == 0 {
+ panic("no return value specified for ListRunnerApplicationDownloads")
+ }
+
var r0 []*github.RunnerApplicationDownload
var r1 *github.Response
var r2 error
@@ -84,10 +131,53 @@ func (_m *GithubEnterpriseClient) ListRunnerApplicationDownloads(ctx context.Con
return r0, r1, r2
}
+// ListRunnerGroups provides a mock function with given fields: ctx, enterprise, opts
+func (_m *GithubEnterpriseClient) ListRunnerGroups(ctx context.Context, enterprise string, opts *github.ListEnterpriseRunnerGroupOptions) (*github.EnterpriseRunnerGroups, *github.Response, error) {
+ ret := _m.Called(ctx, enterprise, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListRunnerGroups")
+ }
+
+ var r0 *github.EnterpriseRunnerGroups
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListEnterpriseRunnerGroupOptions) (*github.EnterpriseRunnerGroups, *github.Response, error)); ok {
+ return rf(ctx, enterprise, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListEnterpriseRunnerGroupOptions) *github.EnterpriseRunnerGroups); ok {
+ r0 = rf(ctx, enterprise, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.EnterpriseRunnerGroups)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, *github.ListEnterpriseRunnerGroupOptions) *github.Response); ok {
+ r1 = rf(ctx, enterprise, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, *github.ListEnterpriseRunnerGroupOptions) error); ok {
+ r2 = rf(ctx, enterprise, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
// ListRunners provides a mock function with given fields: ctx, enterprise, opts
func (_m *GithubEnterpriseClient) ListRunners(ctx context.Context, enterprise string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
ret := _m.Called(ctx, enterprise, opts)
+ if len(ret) == 0 {
+ panic("no return value specified for ListRunners")
+ }
+
var r0 *github.Runners
var r1 *github.Response
var r2 error
@@ -123,6 +213,10 @@ func (_m *GithubEnterpriseClient) ListRunners(ctx context.Context, enterprise st
func (_m *GithubEnterpriseClient) RemoveRunner(ctx context.Context, enterprise string, runnerID int64) (*github.Response, error) {
ret := _m.Called(ctx, enterprise, runnerID)
+ if len(ret) == 0 {
+ panic("no return value specified for RemoveRunner")
+ }
+
var r0 *github.Response
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, int64) (*github.Response, error)); ok {
@@ -145,13 +239,12 @@ func (_m *GithubEnterpriseClient) RemoveRunner(ctx context.Context, enterprise s
return r0, r1
}
-type mockConstructorTestingTNewGithubEnterpriseClient interface {
+// NewGithubEnterpriseClient creates a new instance of GithubEnterpriseClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewGithubEnterpriseClient(t interface {
mock.TestingT
Cleanup(func())
-}
-
-// NewGithubEnterpriseClient creates a new instance of GithubEnterpriseClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-func NewGithubEnterpriseClient(t mockConstructorTestingTNewGithubEnterpriseClient) *GithubEnterpriseClient {
+}) *GithubEnterpriseClient {
mock := &GithubEnterpriseClient{}
mock.Mock.Test(t)
diff --git a/runner/common/mocks/GithubEntityOperations.go b/runner/common/mocks/GithubEntityOperations.go
new file mode 100644
index 00000000..0b3c3f83
--- /dev/null
+++ b/runner/common/mocks/GithubEntityOperations.go
@@ -0,0 +1,871 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ github "github.com/google/go-github/v72/github"
+ mock "github.com/stretchr/testify/mock"
+
+ params "github.com/cloudbase/garm/params"
+
+ url "net/url"
+)
+
+// GithubEntityOperations is an autogenerated mock type for the GithubEntityOperations type
+type GithubEntityOperations struct {
+ mock.Mock
+}
+
+type GithubEntityOperations_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *GithubEntityOperations) EXPECT() *GithubEntityOperations_Expecter {
+ return &GithubEntityOperations_Expecter{mock: &_m.Mock}
+}
+
+// CreateEntityHook provides a mock function with given fields: ctx, hook
+func (_m *GithubEntityOperations) CreateEntityHook(ctx context.Context, hook *github.Hook) (*github.Hook, error) {
+ ret := _m.Called(ctx, hook)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityHook")
+ }
+
+ var r0 *github.Hook
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, *github.Hook) (*github.Hook, error)); ok {
+ return rf(ctx, hook)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *github.Hook) *github.Hook); ok {
+ r0 = rf(ctx, hook)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *github.Hook) error); ok {
+ r1 = rf(ctx, hook)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_CreateEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityHook'
+type GithubEntityOperations_CreateEntityHook_Call struct {
+ *mock.Call
+}
+
+// CreateEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - hook *github.Hook
+func (_e *GithubEntityOperations_Expecter) CreateEntityHook(ctx interface{}, hook interface{}) *GithubEntityOperations_CreateEntityHook_Call {
+ return &GithubEntityOperations_CreateEntityHook_Call{Call: _e.mock.On("CreateEntityHook", ctx, hook)}
+}
+
+func (_c *GithubEntityOperations_CreateEntityHook_Call) Run(run func(ctx context.Context, hook *github.Hook)) *GithubEntityOperations_CreateEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.Hook))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_CreateEntityHook_Call) Return(ret *github.Hook, err error) *GithubEntityOperations_CreateEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_CreateEntityHook_Call) RunAndReturn(run func(context.Context, *github.Hook) (*github.Hook, error)) *GithubEntityOperations_CreateEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateEntityRegistrationToken provides a mock function with given fields: ctx
+func (_m *GithubEntityOperations) CreateEntityRegistrationToken(ctx context.Context) (*github.RegistrationToken, *github.Response, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEntityRegistrationToken")
+ }
+
+ var r0 *github.RegistrationToken
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*github.RegistrationToken, *github.Response, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *github.RegistrationToken); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.RegistrationToken)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) *github.Response); ok {
+ r1 = rf(ctx)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context) error); ok {
+ r2 = rf(ctx)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubEntityOperations_CreateEntityRegistrationToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEntityRegistrationToken'
+type GithubEntityOperations_CreateEntityRegistrationToken_Call struct {
+ *mock.Call
+}
+
+// CreateEntityRegistrationToken is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubEntityOperations_Expecter) CreateEntityRegistrationToken(ctx interface{}) *GithubEntityOperations_CreateEntityRegistrationToken_Call {
+ return &GithubEntityOperations_CreateEntityRegistrationToken_Call{Call: _e.mock.On("CreateEntityRegistrationToken", ctx)}
+}
+
+func (_c *GithubEntityOperations_CreateEntityRegistrationToken_Call) Run(run func(ctx context.Context)) *GithubEntityOperations_CreateEntityRegistrationToken_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_CreateEntityRegistrationToken_Call) Return(_a0 *github.RegistrationToken, _a1 *github.Response, _a2 error) *GithubEntityOperations_CreateEntityRegistrationToken_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubEntityOperations_CreateEntityRegistrationToken_Call) RunAndReturn(run func(context.Context) (*github.RegistrationToken, *github.Response, error)) *GithubEntityOperations_CreateEntityRegistrationToken_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubEntityOperations) DeleteEntityHook(ctx context.Context, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteEntityHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Response, error)); ok {
+ return rf(ctx, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Response); ok {
+ r0 = rf(ctx, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_DeleteEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteEntityHook'
+type GithubEntityOperations_DeleteEntityHook_Call struct {
+ *mock.Call
+}
+
+// DeleteEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubEntityOperations_Expecter) DeleteEntityHook(ctx interface{}, id interface{}) *GithubEntityOperations_DeleteEntityHook_Call {
+ return &GithubEntityOperations_DeleteEntityHook_Call{Call: _e.mock.On("DeleteEntityHook", ctx, id)}
+}
+
+func (_c *GithubEntityOperations_DeleteEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubEntityOperations_DeleteEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_DeleteEntityHook_Call) Return(ret *github.Response, err error) *GithubEntityOperations_DeleteEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_DeleteEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Response, error)) *GithubEntityOperations_DeleteEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntity provides a mock function with no fields
+func (_m *GithubEntityOperations) GetEntity() params.ForgeEntity {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntity")
+ }
+
+ var r0 params.ForgeEntity
+ if rf, ok := ret.Get(0).(func() params.ForgeEntity); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(params.ForgeEntity)
+ }
+
+ return r0
+}
+
+// GithubEntityOperations_GetEntity_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntity'
+type GithubEntityOperations_GetEntity_Call struct {
+ *mock.Call
+}
+
+// GetEntity is a helper method to define mock.On call
+func (_e *GithubEntityOperations_Expecter) GetEntity() *GithubEntityOperations_GetEntity_Call {
+ return &GithubEntityOperations_GetEntity_Call{Call: _e.mock.On("GetEntity")}
+}
+
+func (_c *GithubEntityOperations_GetEntity_Call) Run(run func()) *GithubEntityOperations_GetEntity_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntity_Call) Return(_a0 params.ForgeEntity) *GithubEntityOperations_GetEntity_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntity_Call) RunAndReturn(run func() params.ForgeEntity) *GithubEntityOperations_GetEntity_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubEntityOperations) GetEntityHook(ctx context.Context, id int64) (*github.Hook, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityHook")
+ }
+
+ var r0 *github.Hook
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Hook, error)); ok {
+ return rf(ctx, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Hook); ok {
+ r0 = rf(ctx, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_GetEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityHook'
+type GithubEntityOperations_GetEntityHook_Call struct {
+ *mock.Call
+}
+
+// GetEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubEntityOperations_Expecter) GetEntityHook(ctx interface{}, id interface{}) *GithubEntityOperations_GetEntityHook_Call {
+ return &GithubEntityOperations_GetEntityHook_Call{Call: _e.mock.On("GetEntityHook", ctx, id)}
+}
+
+func (_c *GithubEntityOperations_GetEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubEntityOperations_GetEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityHook_Call) Return(ret *github.Hook, err error) *GithubEntityOperations_GetEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Hook, error)) *GithubEntityOperations_GetEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityJITConfig provides a mock function with given fields: ctx, instance, pool, labels
+func (_m *GithubEntityOperations) GetEntityJITConfig(ctx context.Context, instance string, pool params.Pool, labels []string) (map[string]string, *github.Runner, error) {
+ ret := _m.Called(ctx, instance, pool, labels)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityJITConfig")
+ }
+
+ var r0 map[string]string
+ var r1 *github.Runner
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.Pool, []string) (map[string]string, *github.Runner, error)); ok {
+ return rf(ctx, instance, pool, labels)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, params.Pool, []string) map[string]string); ok {
+ r0 = rf(ctx, instance, pool, labels)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(map[string]string)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, params.Pool, []string) *github.Runner); ok {
+ r1 = rf(ctx, instance, pool, labels)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Runner)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, params.Pool, []string) error); ok {
+ r2 = rf(ctx, instance, pool, labels)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubEntityOperations_GetEntityJITConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityJITConfig'
+type GithubEntityOperations_GetEntityJITConfig_Call struct {
+ *mock.Call
+}
+
+// GetEntityJITConfig is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - pool params.Pool
+// - labels []string
+func (_e *GithubEntityOperations_Expecter) GetEntityJITConfig(ctx interface{}, instance interface{}, pool interface{}, labels interface{}) *GithubEntityOperations_GetEntityJITConfig_Call {
+ return &GithubEntityOperations_GetEntityJITConfig_Call{Call: _e.mock.On("GetEntityJITConfig", ctx, instance, pool, labels)}
+}
+
+func (_c *GithubEntityOperations_GetEntityJITConfig_Call) Run(run func(ctx context.Context, instance string, pool params.Pool, labels []string)) *GithubEntityOperations_GetEntityJITConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(params.Pool), args[3].([]string))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityJITConfig_Call) Return(jitConfigMap map[string]string, runner *github.Runner, err error) *GithubEntityOperations_GetEntityJITConfig_Call {
+ _c.Call.Return(jitConfigMap, runner, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityJITConfig_Call) RunAndReturn(run func(context.Context, string, params.Pool, []string) (map[string]string, *github.Runner, error)) *GithubEntityOperations_GetEntityJITConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEntityRunnerGroupIDByName provides a mock function with given fields: ctx, runnerGroupName
+func (_m *GithubEntityOperations) GetEntityRunnerGroupIDByName(ctx context.Context, runnerGroupName string) (int64, error) {
+ ret := _m.Called(ctx, runnerGroupName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetEntityRunnerGroupIDByName")
+ }
+
+ var r0 int64
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (int64, error)); ok {
+ return rf(ctx, runnerGroupName)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) int64); ok {
+ r0 = rf(ctx, runnerGroupName)
+ } else {
+ r0 = ret.Get(0).(int64)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, runnerGroupName)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_GetEntityRunnerGroupIDByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntityRunnerGroupIDByName'
+type GithubEntityOperations_GetEntityRunnerGroupIDByName_Call struct {
+ *mock.Call
+}
+
+// GetEntityRunnerGroupIDByName is a helper method to define mock.On call
+// - ctx context.Context
+// - runnerGroupName string
+func (_e *GithubEntityOperations_Expecter) GetEntityRunnerGroupIDByName(ctx interface{}, runnerGroupName interface{}) *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call {
+ return &GithubEntityOperations_GetEntityRunnerGroupIDByName_Call{Call: _e.mock.On("GetEntityRunnerGroupIDByName", ctx, runnerGroupName)}
+}
+
+func (_c *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call) Run(run func(ctx context.Context, runnerGroupName string)) *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call) Return(_a0 int64, _a1 error) *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call) RunAndReturn(run func(context.Context, string) (int64, error)) *GithubEntityOperations_GetEntityRunnerGroupIDByName_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GithubBaseURL provides a mock function with no fields
+func (_m *GithubEntityOperations) GithubBaseURL() *url.URL {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for GithubBaseURL")
+ }
+
+ var r0 *url.URL
+ if rf, ok := ret.Get(0).(func() *url.URL); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*url.URL)
+ }
+ }
+
+ return r0
+}
+
+// GithubEntityOperations_GithubBaseURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GithubBaseURL'
+type GithubEntityOperations_GithubBaseURL_Call struct {
+ *mock.Call
+}
+
+// GithubBaseURL is a helper method to define mock.On call
+func (_e *GithubEntityOperations_Expecter) GithubBaseURL() *GithubEntityOperations_GithubBaseURL_Call {
+ return &GithubEntityOperations_GithubBaseURL_Call{Call: _e.mock.On("GithubBaseURL")}
+}
+
+func (_c *GithubEntityOperations_GithubBaseURL_Call) Run(run func()) *GithubEntityOperations_GithubBaseURL_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_GithubBaseURL_Call) Return(_a0 *url.URL) *GithubEntityOperations_GithubBaseURL_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubEntityOperations_GithubBaseURL_Call) RunAndReturn(run func() *url.URL) *GithubEntityOperations_GithubBaseURL_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityHooks provides a mock function with given fields: ctx, opts
+func (_m *GithubEntityOperations) ListEntityHooks(ctx context.Context, opts *github.ListOptions) ([]*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityHooks")
+ }
+
+ var r0 []*github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListOptions) ([]*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListOptions) []*github.Hook); ok {
+ r0 = rf(ctx, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *github.ListOptions) *github.Response); ok {
+ r1 = rf(ctx, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, *github.ListOptions) error); ok {
+ r2 = rf(ctx, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubEntityOperations_ListEntityHooks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityHooks'
+type GithubEntityOperations_ListEntityHooks_Call struct {
+ *mock.Call
+}
+
+// ListEntityHooks is a helper method to define mock.On call
+// - ctx context.Context
+// - opts *github.ListOptions
+func (_e *GithubEntityOperations_Expecter) ListEntityHooks(ctx interface{}, opts interface{}) *GithubEntityOperations_ListEntityHooks_Call {
+ return &GithubEntityOperations_ListEntityHooks_Call{Call: _e.mock.On("ListEntityHooks", ctx, opts)}
+}
+
+func (_c *GithubEntityOperations_ListEntityHooks_Call) Run(run func(ctx context.Context, opts *github.ListOptions)) *GithubEntityOperations_ListEntityHooks_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.ListOptions))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityHooks_Call) Return(ret []*github.Hook, response *github.Response, err error) *GithubEntityOperations_ListEntityHooks_Call {
+ _c.Call.Return(ret, response, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityHooks_Call) RunAndReturn(run func(context.Context, *github.ListOptions) ([]*github.Hook, *github.Response, error)) *GithubEntityOperations_ListEntityHooks_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityRunnerApplicationDownloads provides a mock function with given fields: ctx
+func (_m *GithubEntityOperations) ListEntityRunnerApplicationDownloads(ctx context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityRunnerApplicationDownloads")
+ }
+
+ var r0 []*github.RunnerApplicationDownload
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) []*github.RunnerApplicationDownload); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*github.RunnerApplicationDownload)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) *github.Response); ok {
+ r1 = rf(ctx)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context) error); ok {
+ r2 = rf(ctx)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityRunnerApplicationDownloads'
+type GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call struct {
+ *mock.Call
+}
+
+// ListEntityRunnerApplicationDownloads is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubEntityOperations_Expecter) ListEntityRunnerApplicationDownloads(ctx interface{}) *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call {
+ return &GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call{Call: _e.mock.On("ListEntityRunnerApplicationDownloads", ctx)}
+}
+
+func (_c *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call) Run(run func(ctx context.Context)) *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call) Return(_a0 []*github.RunnerApplicationDownload, _a1 *github.Response, _a2 error) *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call) RunAndReturn(run func(context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error)) *GithubEntityOperations_ListEntityRunnerApplicationDownloads_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListEntityRunners provides a mock function with given fields: ctx, opts
+func (_m *GithubEntityOperations) ListEntityRunners(ctx context.Context, opts *github.ListRunnersOptions) (*github.Runners, *github.Response, error) {
+ ret := _m.Called(ctx, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListEntityRunners")
+ }
+
+ var r0 *github.Runners
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListRunnersOptions) (*github.Runners, *github.Response, error)); ok {
+ return rf(ctx, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *github.ListRunnersOptions) *github.Runners); ok {
+ r0 = rf(ctx, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Runners)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *github.ListRunnersOptions) *github.Response); ok {
+ r1 = rf(ctx, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, *github.ListRunnersOptions) error); ok {
+ r2 = rf(ctx, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// GithubEntityOperations_ListEntityRunners_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntityRunners'
+type GithubEntityOperations_ListEntityRunners_Call struct {
+ *mock.Call
+}
+
+// ListEntityRunners is a helper method to define mock.On call
+// - ctx context.Context
+// - opts *github.ListRunnersOptions
+func (_e *GithubEntityOperations_Expecter) ListEntityRunners(ctx interface{}, opts interface{}) *GithubEntityOperations_ListEntityRunners_Call {
+ return &GithubEntityOperations_ListEntityRunners_Call{Call: _e.mock.On("ListEntityRunners", ctx, opts)}
+}
+
+func (_c *GithubEntityOperations_ListEntityRunners_Call) Run(run func(ctx context.Context, opts *github.ListRunnersOptions)) *GithubEntityOperations_ListEntityRunners_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*github.ListRunnersOptions))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityRunners_Call) Return(_a0 *github.Runners, _a1 *github.Response, _a2 error) *GithubEntityOperations_ListEntityRunners_Call {
+ _c.Call.Return(_a0, _a1, _a2)
+ return _c
+}
+
+func (_c *GithubEntityOperations_ListEntityRunners_Call) RunAndReturn(run func(context.Context, *github.ListRunnersOptions) (*github.Runners, *github.Response, error)) *GithubEntityOperations_ListEntityRunners_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// PingEntityHook provides a mock function with given fields: ctx, id
+func (_m *GithubEntityOperations) PingEntityHook(ctx context.Context, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for PingEntityHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) (*github.Response, error)); ok {
+ return rf(ctx, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64) *github.Response); ok {
+ r0 = rf(ctx, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
+ r1 = rf(ctx, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_PingEntityHook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PingEntityHook'
+type GithubEntityOperations_PingEntityHook_Call struct {
+ *mock.Call
+}
+
+// PingEntityHook is a helper method to define mock.On call
+// - ctx context.Context
+// - id int64
+func (_e *GithubEntityOperations_Expecter) PingEntityHook(ctx interface{}, id interface{}) *GithubEntityOperations_PingEntityHook_Call {
+ return &GithubEntityOperations_PingEntityHook_Call{Call: _e.mock.On("PingEntityHook", ctx, id)}
+}
+
+func (_c *GithubEntityOperations_PingEntityHook_Call) Run(run func(ctx context.Context, id int64)) *GithubEntityOperations_PingEntityHook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_PingEntityHook_Call) Return(ret *github.Response, err error) *GithubEntityOperations_PingEntityHook_Call {
+ _c.Call.Return(ret, err)
+ return _c
+}
+
+func (_c *GithubEntityOperations_PingEntityHook_Call) RunAndReturn(run func(context.Context, int64) (*github.Response, error)) *GithubEntityOperations_PingEntityHook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RateLimit provides a mock function with given fields: ctx
+func (_m *GithubEntityOperations) RateLimit(ctx context.Context) (*github.RateLimits, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RateLimit")
+ }
+
+ var r0 *github.RateLimits
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*github.RateLimits, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *github.RateLimits); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.RateLimits)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GithubEntityOperations_RateLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RateLimit'
+type GithubEntityOperations_RateLimit_Call struct {
+ *mock.Call
+}
+
+// RateLimit is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *GithubEntityOperations_Expecter) RateLimit(ctx interface{}) *GithubEntityOperations_RateLimit_Call {
+ return &GithubEntityOperations_RateLimit_Call{Call: _e.mock.On("RateLimit", ctx)}
+}
+
+func (_c *GithubEntityOperations_RateLimit_Call) Run(run func(ctx context.Context)) *GithubEntityOperations_RateLimit_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_RateLimit_Call) Return(_a0 *github.RateLimits, _a1 error) *GithubEntityOperations_RateLimit_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *GithubEntityOperations_RateLimit_Call) RunAndReturn(run func(context.Context) (*github.RateLimits, error)) *GithubEntityOperations_RateLimit_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RemoveEntityRunner provides a mock function with given fields: ctx, runnerID
+func (_m *GithubEntityOperations) RemoveEntityRunner(ctx context.Context, runnerID int64) error {
+ ret := _m.Called(ctx, runnerID)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RemoveEntityRunner")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
+ r0 = rf(ctx, runnerID)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// GithubEntityOperations_RemoveEntityRunner_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveEntityRunner'
+type GithubEntityOperations_RemoveEntityRunner_Call struct {
+ *mock.Call
+}
+
+// RemoveEntityRunner is a helper method to define mock.On call
+// - ctx context.Context
+// - runnerID int64
+func (_e *GithubEntityOperations_Expecter) RemoveEntityRunner(ctx interface{}, runnerID interface{}) *GithubEntityOperations_RemoveEntityRunner_Call {
+ return &GithubEntityOperations_RemoveEntityRunner_Call{Call: _e.mock.On("RemoveEntityRunner", ctx, runnerID)}
+}
+
+func (_c *GithubEntityOperations_RemoveEntityRunner_Call) Run(run func(ctx context.Context, runnerID int64)) *GithubEntityOperations_RemoveEntityRunner_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(int64))
+ })
+ return _c
+}
+
+func (_c *GithubEntityOperations_RemoveEntityRunner_Call) Return(_a0 error) *GithubEntityOperations_RemoveEntityRunner_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *GithubEntityOperations_RemoveEntityRunner_Call) RunAndReturn(run func(context.Context, int64) error) *GithubEntityOperations_RemoveEntityRunner_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewGithubEntityOperations creates a new instance of GithubEntityOperations. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewGithubEntityOperations(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *GithubEntityOperations {
+ mock := &GithubEntityOperations{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/runner/common/mocks/OrganizationHooks.go b/runner/common/mocks/OrganizationHooks.go
new file mode 100644
index 00000000..73528638
--- /dev/null
+++ b/runner/common/mocks/OrganizationHooks.go
@@ -0,0 +1,206 @@
+// Code generated by mockery v2.42.0. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ github "github.com/google/go-github/v72/github"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// OrganizationHooks is an autogenerated mock type for the OrganizationHooks type
+type OrganizationHooks struct {
+ mock.Mock
+}
+
+// CreateOrgHook provides a mock function with given fields: ctx, org, hook
+func (_m *OrganizationHooks) CreateOrgHook(ctx context.Context, org string, hook *github.Hook) (*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, org, hook)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateOrgHook")
+ }
+
+ var r0 *github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.Hook) (*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, org, hook)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.Hook) *github.Hook); ok {
+ r0 = rf(ctx, org, hook)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, *github.Hook) *github.Response); ok {
+ r1 = rf(ctx, org, hook)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, *github.Hook) error); ok {
+ r2 = rf(ctx, org, hook)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// DeleteOrgHook provides a mock function with given fields: ctx, org, id
+func (_m *OrganizationHooks) DeleteOrgHook(ctx context.Context, org string, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, org, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteOrgHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) (*github.Response, error)); ok {
+ return rf(ctx, org, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) *github.Response); ok {
+ r0 = rf(ctx, org, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, int64) error); ok {
+ r1 = rf(ctx, org, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GetOrgHook provides a mock function with given fields: ctx, org, id
+func (_m *OrganizationHooks) GetOrgHook(ctx context.Context, org string, id int64) (*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, org, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetOrgHook")
+ }
+
+ var r0 *github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) (*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, org, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) *github.Hook); ok {
+ r0 = rf(ctx, org, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, int64) *github.Response); ok {
+ r1 = rf(ctx, org, id)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, int64) error); ok {
+ r2 = rf(ctx, org, id)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// ListOrgHooks provides a mock function with given fields: ctx, org, opts
+func (_m *OrganizationHooks) ListOrgHooks(ctx context.Context, org string, opts *github.ListOptions) ([]*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, org, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListOrgHooks")
+ }
+
+ var r0 []*github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListOptions) ([]*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, org, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, *github.ListOptions) []*github.Hook); ok {
+ r0 = rf(ctx, org, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, *github.ListOptions) *github.Response); ok {
+ r1 = rf(ctx, org, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, *github.ListOptions) error); ok {
+ r2 = rf(ctx, org, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// PingOrgHook provides a mock function with given fields: ctx, org, id
+func (_m *OrganizationHooks) PingOrgHook(ctx context.Context, org string, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, org, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for PingOrgHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) (*github.Response, error)); ok {
+ return rf(ctx, org, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64) *github.Response); ok {
+ r0 = rf(ctx, org, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, int64) error); ok {
+ r1 = rf(ctx, org, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// NewOrganizationHooks creates a new instance of OrganizationHooks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewOrganizationHooks(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *OrganizationHooks {
+ mock := &OrganizationHooks{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/runner/common/mocks/PoolManager.go b/runner/common/mocks/PoolManager.go
index 3f6a0594..a1a62f4f 100644
--- a/runner/common/mocks/PoolManager.go
+++ b/runner/common/mocks/PoolManager.go
@@ -1,8 +1,10 @@
-// Code generated by mockery v2.22.1. DO NOT EDIT.
+// Code generated by mockery. DO NOT EDIT.
package mocks
import (
+ context "context"
+
params "github.com/cloudbase/garm/params"
mock "github.com/stretchr/testify/mock"
)
@@ -12,24 +14,78 @@ type PoolManager struct {
mock.Mock
}
-// ForceDeleteRunner provides a mock function with given fields: runner
-func (_m *PoolManager) ForceDeleteRunner(runner params.Instance) error {
- ret := _m.Called(runner)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(params.Instance) error); ok {
- r0 = rf(runner)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+type PoolManager_Expecter struct {
+ mock *mock.Mock
}
-// GithubRunnerRegistrationToken provides a mock function with given fields:
+func (_m *PoolManager) EXPECT() *PoolManager_Expecter {
+ return &PoolManager_Expecter{mock: &_m.Mock}
+}
+
+// GetWebhookInfo provides a mock function with given fields: ctx
+func (_m *PoolManager) GetWebhookInfo(ctx context.Context) (params.HookInfo, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetWebhookInfo")
+ }
+
+ var r0 params.HookInfo
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (params.HookInfo, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) params.HookInfo); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(params.HookInfo)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PoolManager_GetWebhookInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetWebhookInfo'
+type PoolManager_GetWebhookInfo_Call struct {
+ *mock.Call
+}
+
+// GetWebhookInfo is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *PoolManager_Expecter) GetWebhookInfo(ctx interface{}) *PoolManager_GetWebhookInfo_Call {
+ return &PoolManager_GetWebhookInfo_Call{Call: _e.mock.On("GetWebhookInfo", ctx)}
+}
+
+func (_c *PoolManager_GetWebhookInfo_Call) Run(run func(ctx context.Context)) *PoolManager_GetWebhookInfo_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *PoolManager_GetWebhookInfo_Call) Return(_a0 params.HookInfo, _a1 error) *PoolManager_GetWebhookInfo_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManager_GetWebhookInfo_Call) RunAndReturn(run func(context.Context) (params.HookInfo, error)) *PoolManager_GetWebhookInfo_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GithubRunnerRegistrationToken provides a mock function with no fields
func (_m *PoolManager) GithubRunnerRegistrationToken() (string, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for GithubRunnerRegistrationToken")
+ }
+
var r0 string
var r1 error
if rf, ok := ret.Get(0).(func() (string, error)); ok {
@@ -50,10 +106,41 @@ func (_m *PoolManager) GithubRunnerRegistrationToken() (string, error) {
return r0, r1
}
+// PoolManager_GithubRunnerRegistrationToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GithubRunnerRegistrationToken'
+type PoolManager_GithubRunnerRegistrationToken_Call struct {
+ *mock.Call
+}
+
+// GithubRunnerRegistrationToken is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) GithubRunnerRegistrationToken() *PoolManager_GithubRunnerRegistrationToken_Call {
+ return &PoolManager_GithubRunnerRegistrationToken_Call{Call: _e.mock.On("GithubRunnerRegistrationToken")}
+}
+
+func (_c *PoolManager_GithubRunnerRegistrationToken_Call) Run(run func()) *PoolManager_GithubRunnerRegistrationToken_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_GithubRunnerRegistrationToken_Call) Return(_a0 string, _a1 error) *PoolManager_GithubRunnerRegistrationToken_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManager_GithubRunnerRegistrationToken_Call) RunAndReturn(run func() (string, error)) *PoolManager_GithubRunnerRegistrationToken_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// HandleWorkflowJob provides a mock function with given fields: job
func (_m *PoolManager) HandleWorkflowJob(job params.WorkflowJob) error {
ret := _m.Called(job)
+ if len(ret) == 0 {
+ panic("no return value specified for HandleWorkflowJob")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(params.WorkflowJob) error); ok {
r0 = rf(job)
@@ -64,10 +151,42 @@ func (_m *PoolManager) HandleWorkflowJob(job params.WorkflowJob) error {
return r0
}
-// ID provides a mock function with given fields:
+// PoolManager_HandleWorkflowJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HandleWorkflowJob'
+type PoolManager_HandleWorkflowJob_Call struct {
+ *mock.Call
+}
+
+// HandleWorkflowJob is a helper method to define mock.On call
+// - job params.WorkflowJob
+func (_e *PoolManager_Expecter) HandleWorkflowJob(job interface{}) *PoolManager_HandleWorkflowJob_Call {
+ return &PoolManager_HandleWorkflowJob_Call{Call: _e.mock.On("HandleWorkflowJob", job)}
+}
+
+func (_c *PoolManager_HandleWorkflowJob_Call) Run(run func(job params.WorkflowJob)) *PoolManager_HandleWorkflowJob_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.WorkflowJob))
+ })
+ return _c
+}
+
+func (_c *PoolManager_HandleWorkflowJob_Call) Return(_a0 error) *PoolManager_HandleWorkflowJob_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_HandleWorkflowJob_Call) RunAndReturn(run func(params.WorkflowJob) error) *PoolManager_HandleWorkflowJob_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ID provides a mock function with no fields
func (_m *PoolManager) ID() string {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for ID")
+ }
+
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
@@ -78,24 +197,187 @@ func (_m *PoolManager) ID() string {
return r0
}
-// RefreshState provides a mock function with given fields: param
-func (_m *PoolManager) RefreshState(param params.UpdatePoolStateParams) error {
- ret := _m.Called(param)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(params.UpdatePoolStateParams) error); ok {
- r0 = rf(param)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
+// PoolManager_ID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ID'
+type PoolManager_ID_Call struct {
+ *mock.Call
}
-// Start provides a mock function with given fields:
+// ID is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) ID() *PoolManager_ID_Call {
+ return &PoolManager_ID_Call{Call: _e.mock.On("ID")}
+}
+
+func (_c *PoolManager_ID_Call) Run(run func()) *PoolManager_ID_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_ID_Call) Return(_a0 string) *PoolManager_ID_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_ID_Call) RunAndReturn(run func() string) *PoolManager_ID_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// InstallWebhook provides a mock function with given fields: ctx, param
+func (_m *PoolManager) InstallWebhook(ctx context.Context, param params.InstallWebhookParams) (params.HookInfo, error) {
+ ret := _m.Called(ctx, param)
+
+ if len(ret) == 0 {
+ panic("no return value specified for InstallWebhook")
+ }
+
+ var r0 params.HookInfo
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, params.InstallWebhookParams) (params.HookInfo, error)); ok {
+ return rf(ctx, param)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, params.InstallWebhookParams) params.HookInfo); ok {
+ r0 = rf(ctx, param)
+ } else {
+ r0 = ret.Get(0).(params.HookInfo)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, params.InstallWebhookParams) error); ok {
+ r1 = rf(ctx, param)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PoolManager_InstallWebhook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InstallWebhook'
+type PoolManager_InstallWebhook_Call struct {
+ *mock.Call
+}
+
+// InstallWebhook is a helper method to define mock.On call
+// - ctx context.Context
+// - param params.InstallWebhookParams
+func (_e *PoolManager_Expecter) InstallWebhook(ctx interface{}, param interface{}) *PoolManager_InstallWebhook_Call {
+ return &PoolManager_InstallWebhook_Call{Call: _e.mock.On("InstallWebhook", ctx, param)}
+}
+
+func (_c *PoolManager_InstallWebhook_Call) Run(run func(ctx context.Context, param params.InstallWebhookParams)) *PoolManager_InstallWebhook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.InstallWebhookParams))
+ })
+ return _c
+}
+
+func (_c *PoolManager_InstallWebhook_Call) Return(_a0 params.HookInfo, _a1 error) *PoolManager_InstallWebhook_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManager_InstallWebhook_Call) RunAndReturn(run func(context.Context, params.InstallWebhookParams) (params.HookInfo, error)) *PoolManager_InstallWebhook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RootCABundle provides a mock function with no fields
+func (_m *PoolManager) RootCABundle() (params.CertificateBundle, error) {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for RootCABundle")
+ }
+
+ var r0 params.CertificateBundle
+ var r1 error
+ if rf, ok := ret.Get(0).(func() (params.CertificateBundle, error)); ok {
+ return rf()
+ }
+ if rf, ok := ret.Get(0).(func() params.CertificateBundle); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(params.CertificateBundle)
+ }
+
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PoolManager_RootCABundle_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RootCABundle'
+type PoolManager_RootCABundle_Call struct {
+ *mock.Call
+}
+
+// RootCABundle is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) RootCABundle() *PoolManager_RootCABundle_Call {
+ return &PoolManager_RootCABundle_Call{Call: _e.mock.On("RootCABundle")}
+}
+
+func (_c *PoolManager_RootCABundle_Call) Run(run func()) *PoolManager_RootCABundle_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_RootCABundle_Call) Return(_a0 params.CertificateBundle, _a1 error) *PoolManager_RootCABundle_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManager_RootCABundle_Call) RunAndReturn(run func() (params.CertificateBundle, error)) *PoolManager_RootCABundle_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// SetPoolRunningState provides a mock function with given fields: isRunning, failureReason
+func (_m *PoolManager) SetPoolRunningState(isRunning bool, failureReason string) {
+ _m.Called(isRunning, failureReason)
+}
+
+// PoolManager_SetPoolRunningState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetPoolRunningState'
+type PoolManager_SetPoolRunningState_Call struct {
+ *mock.Call
+}
+
+// SetPoolRunningState is a helper method to define mock.On call
+// - isRunning bool
+// - failureReason string
+func (_e *PoolManager_Expecter) SetPoolRunningState(isRunning interface{}, failureReason interface{}) *PoolManager_SetPoolRunningState_Call {
+ return &PoolManager_SetPoolRunningState_Call{Call: _e.mock.On("SetPoolRunningState", isRunning, failureReason)}
+}
+
+func (_c *PoolManager_SetPoolRunningState_Call) Run(run func(isRunning bool, failureReason string)) *PoolManager_SetPoolRunningState_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(bool), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *PoolManager_SetPoolRunningState_Call) Return() *PoolManager_SetPoolRunningState_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *PoolManager_SetPoolRunningState_Call) RunAndReturn(run func(bool, string)) *PoolManager_SetPoolRunningState_Call {
+ _c.Run(run)
+ return _c
+}
+
+// Start provides a mock function with no fields
func (_m *PoolManager) Start() error {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Start")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
@@ -106,10 +388,41 @@ func (_m *PoolManager) Start() error {
return r0
}
-// Status provides a mock function with given fields:
+// PoolManager_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'
+type PoolManager_Start_Call struct {
+ *mock.Call
+}
+
+// Start is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) Start() *PoolManager_Start_Call {
+ return &PoolManager_Start_Call{Call: _e.mock.On("Start")}
+}
+
+func (_c *PoolManager_Start_Call) Run(run func()) *PoolManager_Start_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_Start_Call) Return(_a0 error) *PoolManager_Start_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_Start_Call) RunAndReturn(run func() error) *PoolManager_Start_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Status provides a mock function with no fields
func (_m *PoolManager) Status() params.PoolManagerStatus {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Status")
+ }
+
var r0 params.PoolManagerStatus
if rf, ok := ret.Get(0).(func() params.PoolManagerStatus); ok {
r0 = rf()
@@ -120,10 +433,41 @@ func (_m *PoolManager) Status() params.PoolManagerStatus {
return r0
}
-// Stop provides a mock function with given fields:
+// PoolManager_Status_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Status'
+type PoolManager_Status_Call struct {
+ *mock.Call
+}
+
+// Status is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) Status() *PoolManager_Status_Call {
+ return &PoolManager_Status_Call{Call: _e.mock.On("Status")}
+}
+
+func (_c *PoolManager_Status_Call) Run(run func()) *PoolManager_Status_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_Status_Call) Return(_a0 params.PoolManagerStatus) *PoolManager_Status_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_Status_Call) RunAndReturn(run func() params.PoolManagerStatus) *PoolManager_Status_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Stop provides a mock function with no fields
func (_m *PoolManager) Stop() error {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Stop")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
@@ -134,10 +478,87 @@ func (_m *PoolManager) Stop() error {
return r0
}
-// Wait provides a mock function with given fields:
+// PoolManager_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop'
+type PoolManager_Stop_Call struct {
+ *mock.Call
+}
+
+// Stop is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) Stop() *PoolManager_Stop_Call {
+ return &PoolManager_Stop_Call{Call: _e.mock.On("Stop")}
+}
+
+func (_c *PoolManager_Stop_Call) Run(run func()) *PoolManager_Stop_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_Stop_Call) Return(_a0 error) *PoolManager_Stop_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_Stop_Call) RunAndReturn(run func() error) *PoolManager_Stop_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UninstallWebhook provides a mock function with given fields: ctx
+func (_m *PoolManager) UninstallWebhook(ctx context.Context) error {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UninstallWebhook")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// PoolManager_UninstallWebhook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UninstallWebhook'
+type PoolManager_UninstallWebhook_Call struct {
+ *mock.Call
+}
+
+// UninstallWebhook is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *PoolManager_Expecter) UninstallWebhook(ctx interface{}) *PoolManager_UninstallWebhook_Call {
+ return &PoolManager_UninstallWebhook_Call{Call: _e.mock.On("UninstallWebhook", ctx)}
+}
+
+func (_c *PoolManager_UninstallWebhook_Call) Run(run func(ctx context.Context)) *PoolManager_UninstallWebhook_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *PoolManager_UninstallWebhook_Call) Return(_a0 error) *PoolManager_UninstallWebhook_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_UninstallWebhook_Call) RunAndReturn(run func(context.Context) error) *PoolManager_UninstallWebhook_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Wait provides a mock function with no fields
func (_m *PoolManager) Wait() error {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Wait")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
@@ -148,10 +569,41 @@ func (_m *PoolManager) Wait() error {
return r0
}
-// WebhookSecret provides a mock function with given fields:
+// PoolManager_Wait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Wait'
+type PoolManager_Wait_Call struct {
+ *mock.Call
+}
+
+// Wait is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) Wait() *PoolManager_Wait_Call {
+ return &PoolManager_Wait_Call{Call: _e.mock.On("Wait")}
+}
+
+func (_c *PoolManager_Wait_Call) Run(run func()) *PoolManager_Wait_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_Wait_Call) Return(_a0 error) *PoolManager_Wait_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_Wait_Call) RunAndReturn(run func() error) *PoolManager_Wait_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// WebhookSecret provides a mock function with no fields
func (_m *PoolManager) WebhookSecret() string {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for WebhookSecret")
+ }
+
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
@@ -162,13 +614,39 @@ func (_m *PoolManager) WebhookSecret() string {
return r0
}
-type mockConstructorTestingTNewPoolManager interface {
- mock.TestingT
- Cleanup(func())
+// PoolManager_WebhookSecret_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WebhookSecret'
+type PoolManager_WebhookSecret_Call struct {
+ *mock.Call
+}
+
+// WebhookSecret is a helper method to define mock.On call
+func (_e *PoolManager_Expecter) WebhookSecret() *PoolManager_WebhookSecret_Call {
+ return &PoolManager_WebhookSecret_Call{Call: _e.mock.On("WebhookSecret")}
+}
+
+func (_c *PoolManager_WebhookSecret_Call) Run(run func()) *PoolManager_WebhookSecret_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManager_WebhookSecret_Call) Return(_a0 string) *PoolManager_WebhookSecret_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManager_WebhookSecret_Call) RunAndReturn(run func() string) *PoolManager_WebhookSecret_Call {
+ _c.Call.Return(run)
+ return _c
}
// NewPoolManager creates a new instance of PoolManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-func NewPoolManager(t mockConstructorTestingTNewPoolManager) *PoolManager {
+// The first argument is typically a *testing.T value.
+func NewPoolManager(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *PoolManager {
mock := &PoolManager{}
mock.Mock.Test(t)
diff --git a/runner/common/mocks/Provider.go b/runner/common/mocks/Provider.go
index fb69eb4e..5bf94a10 100644
--- a/runner/common/mocks/Provider.go
+++ b/runner/common/mocks/Provider.go
@@ -1,12 +1,17 @@
-// Code generated by mockery v2.22.1. DO NOT EDIT.
+// Code generated by mockery. DO NOT EDIT.
package mocks
import (
context "context"
- params "github.com/cloudbase/garm/params"
+ common "github.com/cloudbase/garm/runner/common"
+
+ garm_provider_commonparams "github.com/cloudbase/garm-provider-common/params"
+
mock "github.com/stretchr/testify/mock"
+
+ params "github.com/cloudbase/garm/params"
)
// Provider is an autogenerated mock type for the Provider type
@@ -14,10 +19,22 @@ type Provider struct {
mock.Mock
}
-// AsParams provides a mock function with given fields:
+type Provider_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *Provider) EXPECT() *Provider_Expecter {
+ return &Provider_Expecter{mock: &_m.Mock}
+}
+
+// AsParams provides a mock function with no fields
func (_m *Provider) AsParams() params.Provider {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for AsParams")
+ }
+
var r0 params.Provider
if rf, ok := ret.Get(0).(func() params.Provider); ok {
r0 = rf()
@@ -28,23 +45,54 @@ func (_m *Provider) AsParams() params.Provider {
return r0
}
-// CreateInstance provides a mock function with given fields: ctx, bootstrapParams
-func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams params.BootstrapInstance) (params.Instance, error) {
- ret := _m.Called(ctx, bootstrapParams)
+// Provider_AsParams_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AsParams'
+type Provider_AsParams_Call struct {
+ *mock.Call
+}
- var r0 params.Instance
+// AsParams is a helper method to define mock.On call
+func (_e *Provider_Expecter) AsParams() *Provider_AsParams_Call {
+ return &Provider_AsParams_Call{Call: _e.mock.On("AsParams")}
+}
+
+func (_c *Provider_AsParams_Call) Run(run func()) *Provider_AsParams_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *Provider_AsParams_Call) Return(_a0 params.Provider) *Provider_AsParams_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_AsParams_Call) RunAndReturn(run func() params.Provider) *Provider_AsParams_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateInstance provides a mock function with given fields: ctx, bootstrapParams, createInstanceParams
+func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams garm_provider_commonparams.BootstrapInstance, createInstanceParams common.CreateInstanceParams) (garm_provider_commonparams.ProviderInstance, error) {
+ ret := _m.Called(ctx, bootstrapParams, createInstanceParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateInstance")
+ }
+
+ var r0 garm_provider_commonparams.ProviderInstance
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, params.BootstrapInstance) (params.Instance, error)); ok {
- return rf(ctx, bootstrapParams)
+ if rf, ok := ret.Get(0).(func(context.Context, garm_provider_commonparams.BootstrapInstance, common.CreateInstanceParams) (garm_provider_commonparams.ProviderInstance, error)); ok {
+ return rf(ctx, bootstrapParams, createInstanceParams)
}
- if rf, ok := ret.Get(0).(func(context.Context, params.BootstrapInstance) params.Instance); ok {
- r0 = rf(ctx, bootstrapParams)
+ if rf, ok := ret.Get(0).(func(context.Context, garm_provider_commonparams.BootstrapInstance, common.CreateInstanceParams) garm_provider_commonparams.ProviderInstance); ok {
+ r0 = rf(ctx, bootstrapParams, createInstanceParams)
} else {
- r0 = ret.Get(0).(params.Instance)
+ r0 = ret.Get(0).(garm_provider_commonparams.ProviderInstance)
}
- if rf, ok := ret.Get(1).(func(context.Context, params.BootstrapInstance) error); ok {
- r1 = rf(ctx, bootstrapParams)
+ if rf, ok := ret.Get(1).(func(context.Context, garm_provider_commonparams.BootstrapInstance, common.CreateInstanceParams) error); ok {
+ r1 = rf(ctx, bootstrapParams, createInstanceParams)
} else {
r1 = ret.Error(1)
}
@@ -52,13 +100,47 @@ func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams params.B
return r0, r1
}
-// DeleteInstance provides a mock function with given fields: ctx, instance
-func (_m *Provider) DeleteInstance(ctx context.Context, instance string) error {
- ret := _m.Called(ctx, instance)
+// Provider_CreateInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateInstance'
+type Provider_CreateInstance_Call struct {
+ *mock.Call
+}
+
+// CreateInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - bootstrapParams garm_provider_commonparams.BootstrapInstance
+// - createInstanceParams common.CreateInstanceParams
+func (_e *Provider_Expecter) CreateInstance(ctx interface{}, bootstrapParams interface{}, createInstanceParams interface{}) *Provider_CreateInstance_Call {
+ return &Provider_CreateInstance_Call{Call: _e.mock.On("CreateInstance", ctx, bootstrapParams, createInstanceParams)}
+}
+
+func (_c *Provider_CreateInstance_Call) Run(run func(ctx context.Context, bootstrapParams garm_provider_commonparams.BootstrapInstance, createInstanceParams common.CreateInstanceParams)) *Provider_CreateInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(garm_provider_commonparams.BootstrapInstance), args[2].(common.CreateInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Provider_CreateInstance_Call) Return(_a0 garm_provider_commonparams.ProviderInstance, _a1 error) *Provider_CreateInstance_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Provider_CreateInstance_Call) RunAndReturn(run func(context.Context, garm_provider_commonparams.BootstrapInstance, common.CreateInstanceParams) (garm_provider_commonparams.ProviderInstance, error)) *Provider_CreateInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteInstance provides a mock function with given fields: ctx, instance, deleteInstanceParams
+func (_m *Provider) DeleteInstance(ctx context.Context, instance string, deleteInstanceParams common.DeleteInstanceParams) error {
+ ret := _m.Called(ctx, instance, deleteInstanceParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteInstance")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
- r0 = rf(ctx, instance)
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.DeleteInstanceParams) error); ok {
+ r0 = rf(ctx, instance, deleteInstanceParams)
} else {
r0 = ret.Error(0)
}
@@ -66,23 +148,102 @@ func (_m *Provider) DeleteInstance(ctx context.Context, instance string) error {
return r0
}
-// GetInstance provides a mock function with given fields: ctx, instance
-func (_m *Provider) GetInstance(ctx context.Context, instance string) (params.Instance, error) {
- ret := _m.Called(ctx, instance)
+// Provider_DeleteInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteInstance'
+type Provider_DeleteInstance_Call struct {
+ *mock.Call
+}
- var r0 params.Instance
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) (params.Instance, error)); ok {
- return rf(ctx, instance)
+// DeleteInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - deleteInstanceParams common.DeleteInstanceParams
+func (_e *Provider_Expecter) DeleteInstance(ctx interface{}, instance interface{}, deleteInstanceParams interface{}) *Provider_DeleteInstance_Call {
+ return &Provider_DeleteInstance_Call{Call: _e.mock.On("DeleteInstance", ctx, instance, deleteInstanceParams)}
+}
+
+func (_c *Provider_DeleteInstance_Call) Run(run func(ctx context.Context, instance string, deleteInstanceParams common.DeleteInstanceParams)) *Provider_DeleteInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(common.DeleteInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Provider_DeleteInstance_Call) Return(_a0 error) *Provider_DeleteInstance_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_DeleteInstance_Call) RunAndReturn(run func(context.Context, string, common.DeleteInstanceParams) error) *Provider_DeleteInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DisableJITConfig provides a mock function with no fields
+func (_m *Provider) DisableJITConfig() bool {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for DisableJITConfig")
}
- if rf, ok := ret.Get(0).(func(context.Context, string) params.Instance); ok {
- r0 = rf(ctx, instance)
+
+ var r0 bool
+ if rf, ok := ret.Get(0).(func() bool); ok {
+ r0 = rf()
} else {
- r0 = ret.Get(0).(params.Instance)
+ r0 = ret.Get(0).(bool)
}
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, instance)
+ return r0
+}
+
+// Provider_DisableJITConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DisableJITConfig'
+type Provider_DisableJITConfig_Call struct {
+ *mock.Call
+}
+
+// DisableJITConfig is a helper method to define mock.On call
+func (_e *Provider_Expecter) DisableJITConfig() *Provider_DisableJITConfig_Call {
+ return &Provider_DisableJITConfig_Call{Call: _e.mock.On("DisableJITConfig")}
+}
+
+func (_c *Provider_DisableJITConfig_Call) Run(run func()) *Provider_DisableJITConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *Provider_DisableJITConfig_Call) Return(_a0 bool) *Provider_DisableJITConfig_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_DisableJITConfig_Call) RunAndReturn(run func() bool) *Provider_DisableJITConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetInstance provides a mock function with given fields: ctx, instance, getInstanceParams
+func (_m *Provider) GetInstance(ctx context.Context, instance string, getInstanceParams common.GetInstanceParams) (garm_provider_commonparams.ProviderInstance, error) {
+ ret := _m.Called(ctx, instance, getInstanceParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetInstance")
+ }
+
+ var r0 garm_provider_commonparams.ProviderInstance
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.GetInstanceParams) (garm_provider_commonparams.ProviderInstance, error)); ok {
+ return rf(ctx, instance, getInstanceParams)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.GetInstanceParams) garm_provider_commonparams.ProviderInstance); ok {
+ r0 = rf(ctx, instance, getInstanceParams)
+ } else {
+ r0 = ret.Get(0).(garm_provider_commonparams.ProviderInstance)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, common.GetInstanceParams) error); ok {
+ r1 = rf(ctx, instance, getInstanceParams)
} else {
r1 = ret.Error(1)
}
@@ -90,25 +251,59 @@ func (_m *Provider) GetInstance(ctx context.Context, instance string) (params.In
return r0, r1
}
-// ListInstances provides a mock function with given fields: ctx, poolID
-func (_m *Provider) ListInstances(ctx context.Context, poolID string) ([]params.Instance, error) {
- ret := _m.Called(ctx, poolID)
+// Provider_GetInstance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInstance'
+type Provider_GetInstance_Call struct {
+ *mock.Call
+}
- var r0 []params.Instance
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string) ([]params.Instance, error)); ok {
- return rf(ctx, poolID)
+// GetInstance is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - getInstanceParams common.GetInstanceParams
+func (_e *Provider_Expecter) GetInstance(ctx interface{}, instance interface{}, getInstanceParams interface{}) *Provider_GetInstance_Call {
+ return &Provider_GetInstance_Call{Call: _e.mock.On("GetInstance", ctx, instance, getInstanceParams)}
+}
+
+func (_c *Provider_GetInstance_Call) Run(run func(ctx context.Context, instance string, getInstanceParams common.GetInstanceParams)) *Provider_GetInstance_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(common.GetInstanceParams))
+ })
+ return _c
+}
+
+func (_c *Provider_GetInstance_Call) Return(_a0 garm_provider_commonparams.ProviderInstance, _a1 error) *Provider_GetInstance_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Provider_GetInstance_Call) RunAndReturn(run func(context.Context, string, common.GetInstanceParams) (garm_provider_commonparams.ProviderInstance, error)) *Provider_GetInstance_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ListInstances provides a mock function with given fields: ctx, poolID, listInstancesParams
+func (_m *Provider) ListInstances(ctx context.Context, poolID string, listInstancesParams common.ListInstancesParams) ([]garm_provider_commonparams.ProviderInstance, error) {
+ ret := _m.Called(ctx, poolID, listInstancesParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListInstances")
}
- if rf, ok := ret.Get(0).(func(context.Context, string) []params.Instance); ok {
- r0 = rf(ctx, poolID)
+
+ var r0 []garm_provider_commonparams.ProviderInstance
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.ListInstancesParams) ([]garm_provider_commonparams.ProviderInstance, error)); ok {
+ return rf(ctx, poolID, listInstancesParams)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.ListInstancesParams) []garm_provider_commonparams.ProviderInstance); ok {
+ r0 = rf(ctx, poolID, listInstancesParams)
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).([]params.Instance)
+ r0 = ret.Get(0).([]garm_provider_commonparams.ProviderInstance)
}
}
- if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
- r1 = rf(ctx, poolID)
+ if rf, ok := ret.Get(1).(func(context.Context, string, common.ListInstancesParams) error); ok {
+ r1 = rf(ctx, poolID, listInstancesParams)
} else {
r1 = ret.Error(1)
}
@@ -116,13 +311,47 @@ func (_m *Provider) ListInstances(ctx context.Context, poolID string) ([]params.
return r0, r1
}
-// RemoveAllInstances provides a mock function with given fields: ctx
-func (_m *Provider) RemoveAllInstances(ctx context.Context) error {
- ret := _m.Called(ctx)
+// Provider_ListInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListInstances'
+type Provider_ListInstances_Call struct {
+ *mock.Call
+}
+
+// ListInstances is a helper method to define mock.On call
+// - ctx context.Context
+// - poolID string
+// - listInstancesParams common.ListInstancesParams
+func (_e *Provider_Expecter) ListInstances(ctx interface{}, poolID interface{}, listInstancesParams interface{}) *Provider_ListInstances_Call {
+ return &Provider_ListInstances_Call{Call: _e.mock.On("ListInstances", ctx, poolID, listInstancesParams)}
+}
+
+func (_c *Provider_ListInstances_Call) Run(run func(ctx context.Context, poolID string, listInstancesParams common.ListInstancesParams)) *Provider_ListInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(common.ListInstancesParams))
+ })
+ return _c
+}
+
+func (_c *Provider_ListInstances_Call) Return(_a0 []garm_provider_commonparams.ProviderInstance, _a1 error) *Provider_ListInstances_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *Provider_ListInstances_Call) RunAndReturn(run func(context.Context, string, common.ListInstancesParams) ([]garm_provider_commonparams.ProviderInstance, error)) *Provider_ListInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RemoveAllInstances provides a mock function with given fields: ctx, removeAllInstancesParams
+func (_m *Provider) RemoveAllInstances(ctx context.Context, removeAllInstancesParams common.RemoveAllInstancesParams) error {
+ ret := _m.Called(ctx, removeAllInstancesParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RemoveAllInstances")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context) error); ok {
- r0 = rf(ctx)
+ if rf, ok := ret.Get(0).(func(context.Context, common.RemoveAllInstancesParams) error); ok {
+ r0 = rf(ctx, removeAllInstancesParams)
} else {
r0 = ret.Error(0)
}
@@ -130,13 +359,46 @@ func (_m *Provider) RemoveAllInstances(ctx context.Context) error {
return r0
}
-// Start provides a mock function with given fields: ctx, instance
-func (_m *Provider) Start(ctx context.Context, instance string) error {
- ret := _m.Called(ctx, instance)
+// Provider_RemoveAllInstances_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveAllInstances'
+type Provider_RemoveAllInstances_Call struct {
+ *mock.Call
+}
+
+// RemoveAllInstances is a helper method to define mock.On call
+// - ctx context.Context
+// - removeAllInstancesParams common.RemoveAllInstancesParams
+func (_e *Provider_Expecter) RemoveAllInstances(ctx interface{}, removeAllInstancesParams interface{}) *Provider_RemoveAllInstances_Call {
+ return &Provider_RemoveAllInstances_Call{Call: _e.mock.On("RemoveAllInstances", ctx, removeAllInstancesParams)}
+}
+
+func (_c *Provider_RemoveAllInstances_Call) Run(run func(ctx context.Context, removeAllInstancesParams common.RemoveAllInstancesParams)) *Provider_RemoveAllInstances_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(common.RemoveAllInstancesParams))
+ })
+ return _c
+}
+
+func (_c *Provider_RemoveAllInstances_Call) Return(_a0 error) *Provider_RemoveAllInstances_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_RemoveAllInstances_Call) RunAndReturn(run func(context.Context, common.RemoveAllInstancesParams) error) *Provider_RemoveAllInstances_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Start provides a mock function with given fields: ctx, instance, startParams
+func (_m *Provider) Start(ctx context.Context, instance string, startParams common.StartParams) error {
+ ret := _m.Called(ctx, instance, startParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Start")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
- r0 = rf(ctx, instance)
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.StartParams) error); ok {
+ r0 = rf(ctx, instance, startParams)
} else {
r0 = ret.Error(0)
}
@@ -144,13 +406,47 @@ func (_m *Provider) Start(ctx context.Context, instance string) error {
return r0
}
-// Stop provides a mock function with given fields: ctx, instance, force
-func (_m *Provider) Stop(ctx context.Context, instance string, force bool) error {
- ret := _m.Called(ctx, instance, force)
+// Provider_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'
+type Provider_Start_Call struct {
+ *mock.Call
+}
+
+// Start is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - startParams common.StartParams
+func (_e *Provider_Expecter) Start(ctx interface{}, instance interface{}, startParams interface{}) *Provider_Start_Call {
+ return &Provider_Start_Call{Call: _e.mock.On("Start", ctx, instance, startParams)}
+}
+
+func (_c *Provider_Start_Call) Run(run func(ctx context.Context, instance string, startParams common.StartParams)) *Provider_Start_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(common.StartParams))
+ })
+ return _c
+}
+
+func (_c *Provider_Start_Call) Return(_a0 error) *Provider_Start_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_Start_Call) RunAndReturn(run func(context.Context, string, common.StartParams) error) *Provider_Start_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Stop provides a mock function with given fields: ctx, instance, stopParams
+func (_m *Provider) Stop(ctx context.Context, instance string, stopParams common.StopParams) error {
+ ret := _m.Called(ctx, instance, stopParams)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Stop")
+ }
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, string, bool) error); ok {
- r0 = rf(ctx, instance, force)
+ if rf, ok := ret.Get(0).(func(context.Context, string, common.StopParams) error); ok {
+ r0 = rf(ctx, instance, stopParams)
} else {
r0 = ret.Error(0)
}
@@ -158,13 +454,42 @@ func (_m *Provider) Stop(ctx context.Context, instance string, force bool) error
return r0
}
-type mockConstructorTestingTNewProvider interface {
- mock.TestingT
- Cleanup(func())
+// Provider_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop'
+type Provider_Stop_Call struct {
+ *mock.Call
+}
+
+// Stop is a helper method to define mock.On call
+// - ctx context.Context
+// - instance string
+// - stopParams common.StopParams
+func (_e *Provider_Expecter) Stop(ctx interface{}, instance interface{}, stopParams interface{}) *Provider_Stop_Call {
+ return &Provider_Stop_Call{Call: _e.mock.On("Stop", ctx, instance, stopParams)}
+}
+
+func (_c *Provider_Stop_Call) Run(run func(ctx context.Context, instance string, stopParams common.StopParams)) *Provider_Stop_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string), args[2].(common.StopParams))
+ })
+ return _c
+}
+
+func (_c *Provider_Stop_Call) Return(_a0 error) *Provider_Stop_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Provider_Stop_Call) RunAndReturn(run func(context.Context, string, common.StopParams) error) *Provider_Stop_Call {
+ _c.Call.Return(run)
+ return _c
}
// NewProvider creates a new instance of Provider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-func NewProvider(t mockConstructorTestingTNewProvider) *Provider {
+// The first argument is typically a *testing.T value.
+func NewProvider(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *Provider {
mock := &Provider{}
mock.Mock.Test(t)
diff --git a/runner/common/mocks/RateLimitClient.go b/runner/common/mocks/RateLimitClient.go
new file mode 100644
index 00000000..b7e52f71
--- /dev/null
+++ b/runner/common/mocks/RateLimitClient.go
@@ -0,0 +1,95 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ github "github.com/google/go-github/v72/github"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// RateLimitClient is an autogenerated mock type for the RateLimitClient type
+type RateLimitClient struct {
+ mock.Mock
+}
+
+type RateLimitClient_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *RateLimitClient) EXPECT() *RateLimitClient_Expecter {
+ return &RateLimitClient_Expecter{mock: &_m.Mock}
+}
+
+// RateLimit provides a mock function with given fields: ctx
+func (_m *RateLimitClient) RateLimit(ctx context.Context) (*github.RateLimits, error) {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RateLimit")
+ }
+
+ var r0 *github.RateLimits
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*github.RateLimits, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *github.RateLimits); ok {
+ r0 = rf(ctx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.RateLimits)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// RateLimitClient_RateLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RateLimit'
+type RateLimitClient_RateLimit_Call struct {
+ *mock.Call
+}
+
+// RateLimit is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *RateLimitClient_Expecter) RateLimit(ctx interface{}) *RateLimitClient_RateLimit_Call {
+ return &RateLimitClient_RateLimit_Call{Call: _e.mock.On("RateLimit", ctx)}
+}
+
+func (_c *RateLimitClient_RateLimit_Call) Run(run func(ctx context.Context)) *RateLimitClient_RateLimit_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *RateLimitClient_RateLimit_Call) Return(_a0 *github.RateLimits, _a1 error) *RateLimitClient_RateLimit_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *RateLimitClient_RateLimit_Call) RunAndReturn(run func(context.Context) (*github.RateLimits, error)) *RateLimitClient_RateLimit_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewRateLimitClient creates a new instance of RateLimitClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewRateLimitClient(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *RateLimitClient {
+ mock := &RateLimitClient{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/runner/common/mocks/RepositoryHooks.go b/runner/common/mocks/RepositoryHooks.go
new file mode 100644
index 00000000..3f38915e
--- /dev/null
+++ b/runner/common/mocks/RepositoryHooks.go
@@ -0,0 +1,206 @@
+// Code generated by mockery v2.42.0. DO NOT EDIT.
+
+package mocks
+
+import (
+ context "context"
+
+ github "github.com/google/go-github/v72/github"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// RepositoryHooks is an autogenerated mock type for the RepositoryHooks type
+type RepositoryHooks struct {
+ mock.Mock
+}
+
+// CreateRepoHook provides a mock function with given fields: ctx, owner, repo, hook
+func (_m *RepositoryHooks) CreateRepoHook(ctx context.Context, owner string, repo string, hook *github.Hook) (*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, owner, repo, hook)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateRepoHook")
+ }
+
+ var r0 *github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.Hook) (*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, owner, repo, hook)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.Hook) *github.Hook); ok {
+ r0 = rf(ctx, owner, repo, hook)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, *github.Hook) *github.Response); ok {
+ r1 = rf(ctx, owner, repo, hook)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, string, *github.Hook) error); ok {
+ r2 = rf(ctx, owner, repo, hook)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// DeleteRepoHook provides a mock function with given fields: ctx, owner, repo, id
+func (_m *RepositoryHooks) DeleteRepoHook(ctx context.Context, owner string, repo string, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, owner, repo, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteRepoHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*github.Response, error)); ok {
+ return rf(ctx, owner, repo, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *github.Response); ok {
+ r0 = rf(ctx, owner, repo, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok {
+ r1 = rf(ctx, owner, repo, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// GetRepoHook provides a mock function with given fields: ctx, owner, repo, id
+func (_m *RepositoryHooks) GetRepoHook(ctx context.Context, owner string, repo string, id int64) (*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, owner, repo, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetRepoHook")
+ }
+
+ var r0 *github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, owner, repo, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *github.Hook); ok {
+ r0 = rf(ctx, owner, repo, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) *github.Response); ok {
+ r1 = rf(ctx, owner, repo, id)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, string, int64) error); ok {
+ r2 = rf(ctx, owner, repo, id)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// ListRepoHooks provides a mock function with given fields: ctx, owner, repo, opts
+func (_m *RepositoryHooks) ListRepoHooks(ctx context.Context, owner string, repo string, opts *github.ListOptions) ([]*github.Hook, *github.Response, error) {
+ ret := _m.Called(ctx, owner, repo, opts)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ListRepoHooks")
+ }
+
+ var r0 []*github.Hook
+ var r1 *github.Response
+ var r2 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.ListOptions) ([]*github.Hook, *github.Response, error)); ok {
+ return rf(ctx, owner, repo, opts)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.ListOptions) []*github.Hook); ok {
+ r0 = rf(ctx, owner, repo, opts)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*github.Hook)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, *github.ListOptions) *github.Response); ok {
+ r1 = rf(ctx, owner, repo, opts)
+ } else {
+ if ret.Get(1) != nil {
+ r1 = ret.Get(1).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(2).(func(context.Context, string, string, *github.ListOptions) error); ok {
+ r2 = rf(ctx, owner, repo, opts)
+ } else {
+ r2 = ret.Error(2)
+ }
+
+ return r0, r1, r2
+}
+
+// PingRepoHook provides a mock function with given fields: ctx, owner, repo, id
+func (_m *RepositoryHooks) PingRepoHook(ctx context.Context, owner string, repo string, id int64) (*github.Response, error) {
+ ret := _m.Called(ctx, owner, repo, id)
+
+ if len(ret) == 0 {
+ panic("no return value specified for PingRepoHook")
+ }
+
+ var r0 *github.Response
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*github.Response, error)); ok {
+ return rf(ctx, owner, repo, id)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *github.Response); ok {
+ r0 = rf(ctx, owner, repo, id)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*github.Response)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok {
+ r1 = rf(ctx, owner, repo, id)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// NewRepositoryHooks creates a new instance of RepositoryHooks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewRepositoryHooks(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *RepositoryHooks {
+ mock := &RepositoryHooks{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/runner/common/params.go b/runner/common/params.go
new file mode 100644
index 00000000..fdf73dbc
--- /dev/null
+++ b/runner/common/params.go
@@ -0,0 +1,88 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+import "github.com/cloudbase/garm/params"
+
+// Constants used for the provider interface version.
+const (
+ Version010 = "v0.1.0"
+ Version011 = "v0.1.1"
+)
+
+// Each struct is a wrapper for the actual parameters struct for a specific version.
+// Version 0.1.0 doesn't have any specific parameters, so there is no need for a struct for it.
+type CreateInstanceParams struct {
+ CreateInstanceV011 CreateInstanceV011Params
+}
+
+type DeleteInstanceParams struct {
+ DeleteInstanceV011 DeleteInstanceV011Params
+}
+
+type GetInstanceParams struct {
+ GetInstanceV011 GetInstanceV011Params
+}
+
+type ListInstancesParams struct {
+ ListInstancesV011 ListInstancesV011Params
+}
+
+type RemoveAllInstancesParams struct {
+ RemoveAllInstancesV011 RemoveAllInstancesV011Params
+}
+
+type StopParams struct {
+ StopV011 StopV011Params
+}
+
+type StartParams struct {
+ StartV011 StartV011Params
+}
+
+// Struct for the base provider parameters.
+type ProviderBaseParams struct {
+ PoolInfo params.Pool
+ ControllerInfo params.ControllerInfo
+}
+
+// Structs for version v0.1.1.
+type CreateInstanceV011Params struct {
+ ProviderBaseParams
+}
+
+type DeleteInstanceV011Params struct {
+ ProviderBaseParams
+}
+
+type GetInstanceV011Params struct {
+ ProviderBaseParams
+}
+
+type ListInstancesV011Params struct {
+ ProviderBaseParams
+}
+
+type RemoveAllInstancesV011Params struct {
+ ProviderBaseParams
+}
+
+type StopV011Params struct {
+ ProviderBaseParams
+}
+
+type StartV011Params struct {
+ ProviderBaseParams
+}
diff --git a/runner/common/pool.go b/runner/common/pool.go
index e4c79c06..4cb86a62 100644
--- a/runner/common/pool.go
+++ b/runner/common/pool.go
@@ -15,6 +15,7 @@
package common
import (
+ "context"
"time"
"github.com/cloudbase/garm/params"
@@ -25,30 +26,55 @@ const (
PoolConsilitationInterval = 5 * time.Second
PoolReapTimeoutInterval = 5 * time.Minute
// Temporary tools download token is valid for 1 hour by default.
- // Set this to 15 minutes. This should allow enough time even on slow
- // clouds for the instance to spin up, download the tools and join gh.
- PoolToolUpdateInterval = 15 * time.Minute
+ // There is no point in making an API call to get available tools, for every runner
+ // we spin up. We cache the tools for 5 minutes. This should save us a lot of API calls
+ // in cases where we have a lot of runners spin up at the same time.
+ PoolToolUpdateInterval = 5 * time.Minute
- // UnauthorizedBackoffTimer is the time we wait before making another request
- // after getting an unauthorized error from github. It is unlikely that a second
- // request will not receive the same error, unless the config is changed with new
- // credentials and garm is restarted.
- UnauthorizedBackoffTimer = 3 * time.Hour
+ // BackoffTimer is the time we wait before attempting to make another request
+ // to the github API.
+ BackoffTimer = 1 * time.Minute
)
-//go:generate mockery --all
+//go:generate go run github.com/vektra/mockery/v2@latest
type PoolManager interface {
+ // ID returns the ID of the entity (repo, org, enterprise)
ID() string
+ // WebhookSecret returns the unencrypted webhook secret associated with the webhook installed
+ // in GitHub for GARM. For GARM to receive webhook events for an entity, either the operator or
+ // GARM will have to create a webhook in GitHub which points to the GARM API server. To authenticate
+ // the webhook, a webhook secret is used. This function returns that secret.
WebhookSecret() string
+ // GithubRunnerRegistrationToken returns a new registration token for a github runner. This is used
+ // for GHES installations that have not yet upgraded to a version >= 3.10. Starting with 3.10, we use
+ // just-in-time runners, which no longer require exposing a runner registration token.
GithubRunnerRegistrationToken() (string, error)
+ // HandleWorkflowJob handles a workflow job meant for a particular entity. When a webhook is fired for
+ // a repo, org or enterprise, we determine the destination of that webhook, retrieve the pool manager
+ // for it and call this function with the WorkflowJob as a parameter.
HandleWorkflowJob(job params.WorkflowJob) error
- RefreshState(param params.UpdatePoolStateParams) error
- ForceDeleteRunner(runner params.Instance) error
- // AddPool(ctx context.Context, pool params.Pool) error
- // PoolManager lifecycle functions. Start/stop pool.
+ // InstallWebhook will create a webhook in github for the entity associated with this pool manager.
+ InstallWebhook(ctx context.Context, param params.InstallWebhookParams) (params.HookInfo, error)
+ // GetWebhookInfo will return information about the webhook installed in github for the entity associated
+ GetWebhookInfo(ctx context.Context) (params.HookInfo, error)
+ // UninstallWebhook will remove the webhook installed in github for the entity associated with this pool manager.
+ UninstallWebhook(ctx context.Context) error
+
+ // RootCABundle will return a CA bundle that must be installed on all runners in order to properly validate
+ // x509 certificates used by various systems involved. This CA bundle is defined in the GARM config file and
+ // can include multiple CA certificates for the GARM api server, GHES server and any provider API endpoint that
+ // may use internal or self signed certificates.
+ RootCABundle() (params.CertificateBundle, error)
+
+ SetPoolRunningState(isRunning bool, failureReason string)
+
+ // Start will start the pool manager and all associated workers.
Start() error
+ // Stop will stop the pool manager and all associated workers.
Stop() error
+ // Status will return the current status of the pool manager.
Status() params.PoolManagerStatus
+ // Wait will block until the pool manager has stopped.
Wait() error
}
diff --git a/runner/common/provider.go b/runner/common/provider.go
index 2def7385..a5d0db66 100644
--- a/runner/common/provider.go
+++ b/runner/common/provider.go
@@ -17,25 +17,30 @@ package common
import (
"context"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
"github.com/cloudbase/garm/params"
)
-//go:generate mockery --all
+//go:generate go run github.com/vektra/mockery/v2@latest
type Provider interface {
// CreateInstance creates a new compute instance in the provider.
- CreateInstance(ctx context.Context, bootstrapParams params.BootstrapInstance) (params.Instance, error)
+ CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance, createInstanceParams CreateInstanceParams) (commonParams.ProviderInstance, error)
// Delete instance will delete the instance in a provider.
- DeleteInstance(ctx context.Context, instance string) error
+ DeleteInstance(ctx context.Context, instance string, deleteInstanceParams DeleteInstanceParams) error
// GetInstance will return details about one instance.
- GetInstance(ctx context.Context, instance string) (params.Instance, error)
+ GetInstance(ctx context.Context, instance string, getInstanceParams GetInstanceParams) (commonParams.ProviderInstance, error)
// ListInstances will list all instances for a provider.
- ListInstances(ctx context.Context, poolID string) ([]params.Instance, error)
+ ListInstances(ctx context.Context, poolID string, listInstancesParams ListInstancesParams) ([]commonParams.ProviderInstance, error)
// RemoveAllInstances will remove all instances created by this provider.
- RemoveAllInstances(ctx context.Context) error
+ RemoveAllInstances(ctx context.Context, removeAllInstancesParams RemoveAllInstancesParams) error
// Stop shuts down the instance.
- Stop(ctx context.Context, instance string, force bool) error
+ Stop(ctx context.Context, instance string, stopParams StopParams) error
// Start boots up an instance.
- Start(ctx context.Context, instance string) error
+ Start(ctx context.Context, instance string, startParams StartParams) error
+ // DisableJITConfig tells us if the provider explicitly disables JIT configuration and
+ // forces runner registration tokens to be used. This may happen if a provider has not yet
+ // been updated to support JIT configuration.
+ DisableJITConfig() bool
AsParams() params.Provider
}
diff --git a/runner/common/util.go b/runner/common/util.go
index 554da767..5130dcfd 100644
--- a/runner/common/util.go
+++ b/runner/common/util.go
@@ -1,47 +1,59 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package common
import (
"context"
+ "net/url"
- "github.com/google/go-github/v48/github"
+ "github.com/google/go-github/v72/github"
+
+ "github.com/cloudbase/garm/params"
)
+type GithubEntityOperations interface {
+ ListEntityHooks(ctx context.Context, opts *github.ListOptions) (ret []*github.Hook, response *github.Response, err error)
+ GetEntityHook(ctx context.Context, id int64) (ret *github.Hook, err error)
+ CreateEntityHook(ctx context.Context, hook *github.Hook) (ret *github.Hook, err error)
+ DeleteEntityHook(ctx context.Context, id int64) (ret *github.Response, err error)
+ PingEntityHook(ctx context.Context, id int64) (ret *github.Response, err error)
+ ListEntityRunners(ctx context.Context, opts *github.ListRunnersOptions) (*github.Runners, *github.Response, error)
+ ListEntityRunnerApplicationDownloads(ctx context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error)
+ RemoveEntityRunner(ctx context.Context, runnerID int64) error
+ RateLimit(ctx context.Context) (*github.RateLimits, error)
+ CreateEntityRegistrationToken(ctx context.Context) (*github.RegistrationToken, *github.Response, error)
+ GetEntityJITConfig(ctx context.Context, instance string, pool params.Pool, labels []string) (jitConfigMap map[string]string, runner *github.Runner, err error)
+ GetEntityRunnerGroupIDByName(ctx context.Context, runnerGroupName string) (int64, error)
+
+ // GetEntity returns the GitHub entity for which the github client was instanciated.
+ GetEntity() params.ForgeEntity
+ // GithubBaseURL returns the base URL for the github or GHES API.
+ GithubBaseURL() *url.URL
+}
+
+type RateLimitClient interface {
+ RateLimit(ctx context.Context) (*github.RateLimits, error)
+}
+
// GithubClient that describes the minimum list of functions we need to interact with github.
// Allows for easier testing.
//
-//go:generate mockery --all
+//go:generate go run github.com/vektra/mockery/v2@latest
type GithubClient interface {
+ GithubEntityOperations
+
// GetWorkflowJobByID gets details about a single workflow job.
GetWorkflowJobByID(ctx context.Context, owner, repo string, jobID int64) (*github.WorkflowJob, *github.Response, error)
- // ListRunners lists all runners within a repository.
- ListRunners(ctx context.Context, owner, repo string, opts *github.ListOptions) (*github.Runners, *github.Response, error)
- // ListRunnerApplicationDownloads returns a list of github runner application downloads for the
- // various supported operating systems and architectures.
- ListRunnerApplicationDownloads(ctx context.Context, owner, repo string) ([]*github.RunnerApplicationDownload, *github.Response, error)
- // RemoveRunner removes one runner from a repository.
- RemoveRunner(ctx context.Context, owner, repo string, runnerID int64) (*github.Response, error)
- // CreateRegistrationToken creates a runner registration token for one repository.
- CreateRegistrationToken(ctx context.Context, owner, repo string) (*github.RegistrationToken, *github.Response, error)
-
- // ListOrganizationRunners lists all runners within an organization.
- ListOrganizationRunners(ctx context.Context, owner string, opts *github.ListOptions) (*github.Runners, *github.Response, error)
- // ListOrganizationRunnerApplicationDownloads returns a list of github runner application downloads for the
- // various supported operating systems and architectures.
- ListOrganizationRunnerApplicationDownloads(ctx context.Context, owner string) ([]*github.RunnerApplicationDownload, *github.Response, error)
- // RemoveOrganizationRunner removes one github runner from an organization.
- RemoveOrganizationRunner(ctx context.Context, owner string, runnerID int64) (*github.Response, error)
- // CreateOrganizationRegistrationToken creates a runner registration token for an organization.
- CreateOrganizationRegistrationToken(ctx context.Context, owner string) (*github.RegistrationToken, *github.Response, error)
-}
-
-type GithubEnterpriseClient interface {
- // ListRunners lists all runners within a repository.
- ListRunners(ctx context.Context, enterprise string, opts *github.ListOptions) (*github.Runners, *github.Response, error)
- // RemoveRunner removes one runner from an enterprise.
- RemoveRunner(ctx context.Context, enterprise string, runnerID int64) (*github.Response, error)
- // CreateRegistrationToken creates a runner registration token for an enterprise.
- CreateRegistrationToken(ctx context.Context, enterprise string) (*github.RegistrationToken, *github.Response, error)
- // ListRunnerApplicationDownloads returns a list of github runner application downloads for the
- // various supported operating systems and architectures.
- ListRunnerApplicationDownloads(ctx context.Context, enterprise string) ([]*github.RunnerApplicationDownload, *github.Response, error)
}
diff --git a/runner/common_test.go b/runner/common_test.go
new file mode 100644
index 00000000..247b5ab1
--- /dev/null
+++ b/runner/common_test.go
@@ -0,0 +1,23 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+const (
+ // nolint: gosec
+ notExistingCredentialsName = "not-existent-creds-name"
+ // nolint: gosec
+ invalidCredentialsName = "invalid-creds-name"
+ notExistingProviderName = "not-existent-provider-name"
+)
diff --git a/runner/enterprises.go b/runner/enterprises.go
index 6c4421d8..6b393abd 100644
--- a/runner/enterprises.go
+++ b/runner/enterprises.go
@@ -1,18 +1,31 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package runner
import (
"context"
+ "errors"
"fmt"
- "log"
+ "log/slog"
"strings"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/auth"
- runnerErrors "github.com/cloudbase/garm/errors"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
"github.com/cloudbase/garm/util/appdefaults"
-
- "github.com/pkg/errors"
)
func (r *Runner) CreateEnterprise(ctx context.Context, param params.CreateEnterpriseParams) (enterprise params.Enterprise, err error) {
@@ -22,58 +35,64 @@ func (r *Runner) CreateEnterprise(ctx context.Context, param params.CreateEnterp
err = param.Validate()
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "validating params")
+ return params.Enterprise{}, fmt.Errorf("error validating params: %w", err)
}
- creds, ok := r.credentials[param.CredentialsName]
- if !ok {
+ creds, err := r.store.GetGithubCredentialsByName(ctx, param.CredentialsName, true)
+ if err != nil {
return params.Enterprise{}, runnerErrors.NewBadRequestError("credentials %s not defined", param.CredentialsName)
}
- _, err = r.store.GetEnterprise(ctx, param.Name)
+ _, err = r.store.GetEnterprise(ctx, param.Name, creds.Endpoint.Name)
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
} else {
return params.Enterprise{}, runnerErrors.NewConflictError("enterprise %s already exists", param.Name)
}
- enterprise, err = r.store.CreateEnterprise(ctx, param.Name, creds.Name, param.WebhookSecret)
+ enterprise, err = r.store.CreateEnterprise(ctx, param.Name, creds, param.WebhookSecret, param.PoolBalancerType)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "creating enterprise")
+ return params.Enterprise{}, fmt.Errorf("error creating enterprise: %w", err)
}
defer func() {
if err != nil {
if deleteErr := r.store.DeleteEnterprise(ctx, enterprise.ID); deleteErr != nil {
- log.Printf("failed to delete enterprise: %s", deleteErr)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to delete enterprise",
+ "enterprise_id", enterprise.ID)
}
}
}()
+ // Use the admin context in the pool manager. Any access control is already done above when
+ // updating the store.
var poolMgr common.PoolManager
poolMgr, err = r.poolManagerCtrl.CreateEnterprisePoolManager(r.ctx, enterprise, r.providers, r.store)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "creating enterprise pool manager")
+ return params.Enterprise{}, fmt.Errorf("error creating enterprise pool manager: %w", err)
}
if err := poolMgr.Start(); err != nil {
if deleteErr := r.poolManagerCtrl.DeleteEnterprisePoolManager(enterprise); deleteErr != nil {
- log.Printf("failed to cleanup pool manager for enterprise %s", enterprise.ID)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to cleanup pool manager for enterprise",
+ "enterprise_id", enterprise.ID)
}
- return params.Enterprise{}, errors.Wrap(err, "starting enterprise pool manager")
+ return params.Enterprise{}, fmt.Errorf("error starting enterprise pool manager: %w", err)
}
return enterprise, nil
}
-func (r *Runner) ListEnterprises(ctx context.Context) ([]params.Enterprise, error) {
+func (r *Runner) ListEnterprises(ctx context.Context, filter params.EnterpriseFilter) ([]params.Enterprise, error) {
if !auth.IsAdmin(ctx) {
return nil, runnerErrors.ErrUnauthorized
}
- enterprises, err := r.store.ListEnterprises(ctx)
+ enterprises, err := r.store.ListEnterprises(ctx, filter)
if err != nil {
- return nil, errors.Wrap(err, "listing enterprises")
+ return nil, fmt.Errorf("error listing enterprises: %w", err)
}
var allEnterprises []params.Enterprise
@@ -99,7 +118,7 @@ func (r *Runner) GetEnterpriseByID(ctx context.Context, enterpriseID string) (pa
enterprise, err := r.store.GetEnterpriseByID(ctx, enterpriseID)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ return params.Enterprise{}, fmt.Errorf("error fetching enterprise: %w", err)
}
poolMgr, err := r.poolManagerCtrl.GetEnterprisePoolManager(enterprise)
if err != nil {
@@ -117,34 +136,48 @@ func (r *Runner) DeleteEnterprise(ctx context.Context, enterpriseID string) erro
enterprise, err := r.store.GetEnterpriseByID(ctx, enterpriseID)
if err != nil {
- return errors.Wrap(err, "fetching enterprise")
+ return fmt.Errorf("error fetching enterprise: %w", err)
}
- pools, err := r.store.ListEnterprisePools(ctx, enterpriseID)
+ entity, err := enterprise.GetEntity()
if err != nil {
- return errors.Wrap(err, "fetching enterprise pools")
+ return fmt.Errorf("error getting entity: %w", err)
+ }
+
+ pools, err := r.store.ListEntityPools(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching enterprise pools: %w", err)
}
if len(pools) > 0 {
- poolIds := []string{}
+ poolIDs := []string{}
for _, pool := range pools {
- poolIds = append(poolIds, pool.ID)
+ poolIDs = append(poolIDs, pool.ID)
}
- return runnerErrors.NewBadRequestError("enterprise has pools defined (%s)", strings.Join(poolIds, ", "))
+ return runnerErrors.NewBadRequestError("enterprise has pools defined (%s)", strings.Join(poolIDs, ", "))
+ }
+
+ scaleSets, err := r.store.ListEntityScaleSets(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching enterprise scale sets: %w", err)
+ }
+
+ if len(scaleSets) > 0 {
+ return runnerErrors.NewBadRequestError("enterprise has scale sets defined; delete them first")
}
if err := r.poolManagerCtrl.DeleteEnterprisePoolManager(enterprise); err != nil {
- return errors.Wrap(err, "deleting enterprise pool manager")
+ return fmt.Errorf("error deleting enterprise pool manager: %w", err)
}
if err := r.store.DeleteEnterprise(ctx, enterpriseID); err != nil {
- return errors.Wrapf(err, "removing enterprise %s", enterpriseID)
+ return fmt.Errorf("error removing enterprise %s: %w", enterpriseID, err)
}
return nil
}
-func (r *Runner) UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateRepositoryParams) (params.Enterprise, error) {
+func (r *Runner) UpdateEnterprise(ctx context.Context, enterpriseID string, param params.UpdateEntityParams) (params.Enterprise, error) {
if !auth.IsAdmin(ctx) {
return params.Enterprise{}, runnerErrors.ErrUnauthorized
}
@@ -152,38 +185,23 @@ func (r *Runner) UpdateEnterprise(ctx context.Context, enterpriseID string, para
r.mux.Lock()
defer r.mux.Unlock()
- enterprise, err := r.store.GetEnterpriseByID(ctx, enterpriseID)
- if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "fetching enterprise")
+ switch param.PoolBalancerType {
+ case params.PoolBalancerTypeRoundRobin, params.PoolBalancerTypePack, params.PoolBalancerTypeNone:
+ default:
+ return params.Enterprise{}, runnerErrors.NewBadRequestError("invalid pool balancer type: %s", param.PoolBalancerType)
}
- if param.CredentialsName != "" {
- // Check that credentials are set before saving to db
- if _, ok := r.credentials[param.CredentialsName]; !ok {
- return params.Enterprise{}, runnerErrors.NewBadRequestError("invalid credentials (%s) for enterprise %s", param.CredentialsName, enterprise.Name)
- }
- }
-
- enterprise, err = r.store.UpdateEnterprise(ctx, enterpriseID, param)
+ enterprise, err := r.store.UpdateEnterprise(ctx, enterpriseID, param)
if err != nil {
- return params.Enterprise{}, errors.Wrap(err, "updating enterprise")
+ return params.Enterprise{}, fmt.Errorf("error updating enterprise: %w", err)
}
poolMgr, err := r.poolManagerCtrl.GetEnterprisePoolManager(enterprise)
if err != nil {
- newState := params.UpdatePoolStateParams{
- WebhookSecret: enterprise.WebhookSecret,
- }
- // stop the pool mgr
- if err := poolMgr.RefreshState(newState); err != nil {
- return params.Enterprise{}, errors.Wrap(err, "updating enterprise pool manager")
- }
- } else {
- if _, err := r.poolManagerCtrl.CreateEnterprisePoolManager(r.ctx, enterprise, r.providers, r.store); err != nil {
- return params.Enterprise{}, errors.Wrap(err, "creating enterprise pool manager")
- }
+ return params.Enterprise{}, fmt.Errorf("failed to get enterprise pool manager: %w", err)
}
+ enterprise.PoolManagerStatus = poolMgr.Status()
return enterprise, nil
}
@@ -192,30 +210,23 @@ func (r *Runner) CreateEnterprisePool(ctx context.Context, enterpriseID string,
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- r.mux.Lock()
- defer r.mux.Unlock()
-
- enterprise, err := r.store.GetEnterpriseByID(ctx, enterpriseID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching enterprise")
- }
-
- if _, err := r.poolManagerCtrl.GetEnterprisePoolManager(enterprise); err != nil {
- return params.Pool{}, runnerErrors.ErrNotFound
- }
-
createPoolParams, err := r.appendTagsToCreatePoolParams(param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool params")
+ return params.Pool{}, fmt.Errorf("failed to append tags to create pool params: %w", err)
}
if param.RunnerBootstrapTimeout == 0 {
param.RunnerBootstrapTimeout = appdefaults.DefaultRunnerBootstrapTimeout
}
- pool, err := r.store.CreateEnterprisePool(ctx, enterpriseID, createPoolParams)
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+
+ pool, err := r.store.CreateEntityPool(ctx, entity, createPoolParams)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "creating pool")
+ return params.Pool{}, fmt.Errorf("failed to create enterprise pool: %w", err)
}
return pool, nil
@@ -225,10 +236,13 @@ func (r *Runner) GetEnterprisePoolByID(ctx context.Context, enterpriseID, poolID
if !auth.IsAdmin(ctx) {
return params.Pool{}, runnerErrors.ErrUnauthorized
}
-
- pool, err := r.store.GetEnterprisePool(ctx, enterpriseID, poolID)
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
return pool, nil
}
@@ -238,28 +252,28 @@ func (r *Runner) DeleteEnterprisePool(ctx context.Context, enterpriseID, poolID
return runnerErrors.ErrUnauthorized
}
- // TODO: dedup instance count verification
- pool, err := r.store.GetEnterprisePool(ctx, enterpriseID, poolID)
- if err != nil {
- return errors.Wrap(err, "fetching pool")
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
}
- instances, err := r.store.ListPoolInstances(ctx, pool.ID)
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return errors.Wrap(err, "fetching instances")
+ return fmt.Errorf("error fetching pool: %w", err)
}
+ // nolint:golangci-lint,godox
// TODO: implement a count function
- if len(instances) > 0 {
+ if len(pool.Instances) > 0 {
runnerIDs := []string{}
- for _, run := range instances {
+ for _, run := range pool.Instances {
runnerIDs = append(runnerIDs, run.ID)
}
return runnerErrors.NewBadRequestError("pool has runners: %s", strings.Join(runnerIDs, ", "))
}
- if err := r.store.DeleteEnterprisePool(ctx, enterpriseID, poolID); err != nil {
- return errors.Wrap(err, "deleting pool")
+ if err := r.store.DeleteEntityPool(ctx, entity, poolID); err != nil {
+ return fmt.Errorf("error deleting pool: %w", err)
}
return nil
}
@@ -269,9 +283,13 @@ func (r *Runner) ListEnterprisePools(ctx context.Context, enterpriseID string) (
return []params.Pool{}, runnerErrors.ErrUnauthorized
}
- pools, err := r.store.ListEnterprisePools(ctx, enterpriseID)
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pools, err := r.store.ListEntityPools(ctx, entity)
if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
+ return nil, fmt.Errorf("error fetching pools: %w", err)
}
return pools, nil
}
@@ -281,9 +299,13 @@ func (r *Runner) UpdateEnterprisePool(ctx context.Context, enterpriseID, poolID
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetEnterprisePool(ctx, enterpriseID, poolID)
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
maxRunners := pool.MaxRunners
@@ -300,9 +322,9 @@ func (r *Runner) UpdateEnterprisePool(ctx context.Context, enterpriseID, poolID
return params.Pool{}, runnerErrors.NewBadRequestError("min_idle_runners cannot be larger than max_runners")
}
- newPool, err := r.store.UpdateEnterprisePool(ctx, enterpriseID, poolID, param)
+ newPool, err := r.store.UpdateEntityPool(ctx, entity, poolID, param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "updating pool")
+ return params.Pool{}, fmt.Errorf("error updating pool: %w", err)
}
return newPool, nil
}
@@ -311,26 +333,29 @@ func (r *Runner) ListEnterpriseInstances(ctx context.Context, enterpriseID strin
if !auth.IsAdmin(ctx) {
return nil, runnerErrors.ErrUnauthorized
}
-
- instances, err := r.store.ListEnterpriseInstances(ctx, enterpriseID)
+ entity := params.ForgeEntity{
+ ID: enterpriseID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ instances, err := r.store.ListEntityInstances(ctx, entity)
if err != nil {
- return []params.Instance{}, errors.Wrap(err, "fetching instances")
+ return []params.Instance{}, fmt.Errorf("error fetching instances: %w", err)
}
return instances, nil
}
-func (r *Runner) findEnterprisePoolManager(name string) (common.PoolManager, error) {
+func (r *Runner) findEnterprisePoolManager(name, endpointName string) (common.PoolManager, error) {
r.mux.Lock()
defer r.mux.Unlock()
- enterprise, err := r.store.GetEnterprise(r.ctx, name)
+ enterprise, err := r.store.GetEnterprise(r.ctx, name, endpointName)
if err != nil {
- return nil, errors.Wrap(err, "fetching enterprise")
+ return nil, fmt.Errorf("error fetching enterprise: %w", err)
}
poolManager, err := r.poolManagerCtrl.GetEnterprisePoolManager(enterprise)
if err != nil {
- return nil, errors.Wrap(err, "fetching pool manager for enterprise")
+ return nil, fmt.Errorf("error fetching pool manager for enterprise: %w", err)
}
return poolManager, nil
}
diff --git a/runner/enterprises_test.go b/runner/enterprises_test.go
index 40f79f9f..0724ccf9 100644
--- a/runner/enterprises_test.go
+++ b/runner/enterprises_test.go
@@ -16,22 +16,21 @@ package runner
import (
"context"
+ "errors"
"fmt"
"testing"
- "github.com/cloudbase/garm/auth"
- "github.com/cloudbase/garm/config"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/suite"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/database"
dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
- garmTesting "github.com/cloudbase/garm/internal/testing"
+ garmTesting "github.com/cloudbase/garm/internal/testing" //nolint:typecheck
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
runnerCommonMocks "github.com/cloudbase/garm/runner/common/mocks"
runnerMocks "github.com/cloudbase/garm/runner/mocks"
-
- "github.com/stretchr/testify/mock"
- "github.com/stretchr/testify/suite"
)
type EnterpriseTestFixtures struct {
@@ -40,13 +39,12 @@ type EnterpriseTestFixtures struct {
Store dbCommon.Store
StoreEnterprises map[string]params.Enterprise
Providers map[string]common.Provider
- Credentials map[string]config.Github
+ Credentials map[string]params.ForgeCredentials
CreateEnterpriseParams params.CreateEnterpriseParams
CreatePoolParams params.CreatePoolParams
CreateInstanceParams params.CreateInstanceParams
- UpdateRepoParams params.UpdateRepositoryParams
+ UpdateRepoParams params.UpdateEntityParams
UpdatePoolParams params.UpdatePoolParams
- UpdatePoolStateParams params.UpdatePoolStateParams
ErrMock error
ProviderMock *runnerCommonMocks.Provider
PoolMgrMock *runnerCommonMocks.PoolManager
@@ -57,18 +55,29 @@ type EnterpriseTestSuite struct {
suite.Suite
Fixtures *EnterpriseTestFixtures
Runner *Runner
+
+ testCreds params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ forgeEndpoint params.ForgeEndpoint
+ ghesEndpoint params.ForgeEndpoint
+ ghesCreds params.ForgeCredentials
}
func (s *EnterpriseTestSuite) SetupTest() {
- adminCtx := auth.GetAdminContext()
-
// create testing sqlite database
dbCfg := garmTesting.GetTestSqliteDBConfig(s.T())
- db, err := database.NewDatabase(adminCtx, dbCfg)
+ db, err := database.NewDatabase(context.Background(), dbCfg)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.forgeEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.ghesEndpoint = garmTesting.CreateGHESEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.forgeEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.forgeEndpoint)
+ s.ghesCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "ghes-creds", db, s.T(), s.ghesEndpoint)
+
// create some organization objects in the database, for testing purposes
enterprises := map[string]params.Enterprise{}
for i := 1; i <= 3; i++ {
@@ -76,11 +85,12 @@ func (s *EnterpriseTestSuite) SetupTest() {
enterprise, err := db.CreateEnterprise(
adminCtx,
name,
- fmt.Sprintf("test-creds-%v", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%v", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
- s.FailNow(fmt.Sprintf("failed to create database object (test-enterprise-%v)", i))
+ s.FailNow(fmt.Sprintf("failed to create database object (test-enterprise-%v): %+v", i, err))
}
enterprises[name] = enterprise
}
@@ -97,16 +107,13 @@ func (s *EnterpriseTestSuite) SetupTest() {
Providers: map[string]common.Provider{
"test-provider": providerMock,
},
- Credentials: map[string]config.Github{
- "test-creds": {
- Name: "test-creds-name",
- Description: "test-creds-description",
- OAuth2Token: "test-creds-oauth2-token",
- },
+ Credentials: map[string]params.ForgeCredentials{
+ s.testCreds.Name: s.testCreds,
+ s.secondaryTestCreds.Name: s.secondaryTestCreds,
},
CreateEnterpriseParams: params.CreateEnterpriseParams{
Name: "test-enterprise-create",
- CredentialsName: "test-creds",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-create-enterprise-webhook-secret",
},
CreatePoolParams: params.CreatePoolParams{
@@ -117,15 +124,15 @@ func (s *EnterpriseTestSuite) SetupTest() {
Flavor: "test",
OSType: "linux",
OSArch: "arm64",
- Tags: []string{"self-hosted", "arm64", "linux"},
+ Tags: []string{"arm64-linux-runner"},
RunnerBootstrapTimeout: 0,
},
CreateInstanceParams: params.CreateInstanceParams{
Name: "test-instance-name",
OSType: "linux",
},
- UpdateRepoParams: params.UpdateRepositoryParams{
- CredentialsName: "test-creds",
+ UpdateRepoParams: params.UpdateEntityParams{
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-update-repo-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -134,9 +141,6 @@ func (s *EnterpriseTestSuite) SetupTest() {
Image: "test-images-updated",
Flavor: "test-flavor-updated",
},
- UpdatePoolStateParams: params.UpdatePoolStateParams{
- WebhookSecret: "test-update-repo-webhook-secret",
- },
ErrMock: fmt.Errorf("mock error"),
ProviderMock: providerMock,
PoolMgrMock: runnerCommonMocks.NewPoolManager(s.T()),
@@ -147,7 +151,6 @@ func (s *EnterpriseTestSuite) SetupTest() {
// setup test runner
runner := &Runner{
providers: fixtures.Providers,
- credentials: fixtures.Credentials,
ctx: fixtures.AdminContext,
store: fixtures.Store,
poolManagerCtrl: fixtures.PoolMgrCtrlMock,
@@ -163,12 +166,13 @@ func (s *EnterpriseTestSuite) TestCreateEnterprise() {
// call tested function
enterprise, err := s.Runner.CreateEnterprise(s.Fixtures.AdminContext, s.Fixtures.CreateEnterpriseParams)
+ s.Require().Nil(err)
+ s.Require().Equal(s.Fixtures.CreateEnterpriseParams.Name, enterprise.Name)
+ s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateEnterpriseParams.CredentialsName].Name, enterprise.Credentials.Name)
+ s.Require().Equal(params.PoolBalancerTypeRoundRobin, enterprise.PoolBalancerType)
// assertions
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.CreateEnterpriseParams.Name, enterprise.Name)
- s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateEnterpriseParams.CredentialsName].Name, enterprise.CredentialsName)
}
func (s *EnterpriseTestSuite) TestCreateEnterpriseErrUnauthorized() {
@@ -184,7 +188,7 @@ func (s *EnterpriseTestSuite) TestCreateEnterpriseEmptyParams() {
}
func (s *EnterpriseTestSuite) TestCreateEnterpriseMissingCredentials() {
- s.Fixtures.CreateEnterpriseParams.CredentialsName = "not-existent-creds-name"
+ s.Fixtures.CreateEnterpriseParams.CredentialsName = notExistingCredentialsName
_, err := s.Runner.CreateEnterprise(s.Fixtures.AdminContext, s.Fixtures.CreateEnterpriseParams)
@@ -206,7 +210,7 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("creating enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error creating enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterpriseStartPoolMgrFailed() {
@@ -218,20 +222,80 @@ func (s *EnterpriseTestSuite) TestCreateEnterpriseStartPoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("starting enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error starting enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *EnterpriseTestSuite) TestListEnterprises() {
s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
- orgs, err := s.Runner.ListEnterprises(s.Fixtures.AdminContext)
+ orgs, err := s.Runner.ListEnterprises(s.Fixtures.AdminContext, params.EnterpriseFilter{})
s.Require().Nil(err)
garmTesting.EqualDBEntityByName(s.T(), garmTesting.DBEntityMapToSlice(s.Fixtures.StoreEnterprises), orgs)
}
+func (s *EnterpriseTestSuite) TestListEnterprisesWithFilters() {
+ s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
+
+ enterprise, err := s.Fixtures.Store.CreateEnterprise(
+ s.Fixtures.AdminContext,
+ "test-enterprise",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+ enterprise2, err := s.Fixtures.Store.CreateEnterprise(
+ s.Fixtures.AdminContext,
+ "test-enterprise2",
+ s.testCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+ enterprise3, err := s.Fixtures.Store.CreateEnterprise(
+ s.Fixtures.AdminContext,
+ "test-enterprise",
+ s.ghesCreds,
+ "super secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ s.Require().NoError(err)
+ orgs, err := s.Runner.ListEnterprises(
+ s.Fixtures.AdminContext,
+ params.EnterpriseFilter{
+ Name: "test-enterprise",
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise, enterprise3}, orgs)
+
+ orgs, err = s.Runner.ListEnterprises(
+ s.Fixtures.AdminContext,
+ params.EnterpriseFilter{
+ Name: "test-enterprise",
+ Endpoint: s.ghesEndpoint.Name,
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise3}, orgs)
+
+ orgs, err = s.Runner.ListEnterprises(
+ s.Fixtures.AdminContext,
+ params.EnterpriseFilter{
+ Name: "test-enterprise2",
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Enterprise{enterprise2}, orgs)
+}
+
func (s *EnterpriseTestSuite) TestListEnterprisesErrUnauthorized() {
- _, err := s.Runner.ListEnterprises(context.Background())
+ _, err := s.Runner.ListEnterprises(context.Background(), params.EnterpriseFilter{})
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
@@ -260,7 +324,7 @@ func (s *EnterpriseTestSuite) TestDeleteEnterprise() {
s.Require().Nil(err)
_, err = s.Fixtures.Store.GetEnterpriseByID(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-3"].ID)
- s.Require().Equal("fetching enterprise: not found", err.Error())
+ s.Require().Equal("error fetching enterprise: not found", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterpriseErrUnauthorized() {
@@ -270,7 +334,11 @@ func (s *EnterpriseTestSuite) TestDeleteEnterpriseErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolDefinedFailed() {
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create store enterprises pool: %v", err))
}
@@ -286,20 +354,23 @@ func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolMgrFailed() {
err := s.Runner.DeleteEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("deleting enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error deleting enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *EnterpriseTestSuite) TestUpdateEnterprise() {
s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
- s.Fixtures.PoolMgrCtrlMock.On("CreateEnterprisePoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Enterprise"), s.Fixtures.Providers, s.Fixtures.Store).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
- org, err := s.Runner.UpdateEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.UpdateRepoParams)
+ param := s.Fixtures.UpdateRepoParams
+ param.PoolBalancerType = params.PoolBalancerTypePack
+ org, err := s.Runner.UpdateEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, param)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, org.WebhookSecret)
+ s.Require().Equal(params.PoolBalancerTypePack, org.PoolBalancerType)
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseErrUnauthorized() {
@@ -309,38 +380,34 @@ func (s *EnterpriseTestSuite) TestUpdateEnterpriseErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseInvalidCreds() {
- s.Fixtures.UpdateRepoParams.CredentialsName = "invalid-creds-name"
+ s.Fixtures.UpdateRepoParams.CredentialsName = invalidCredentialsName
_, err := s.Runner.UpdateEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.UpdateRepoParams)
- s.Require().Equal(runnerErrors.NewBadRequestError("invalid credentials (%s) for enterprise %s", s.Fixtures.UpdateRepoParams.CredentialsName, s.Fixtures.StoreEnterprises["test-enterprise-1"].Name), err)
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.FailNow(fmt.Sprintf("expected error: %v", runnerErrors.ErrNotFound))
+ }
}
func (s *EnterpriseTestSuite) TestUpdateEnterprisePoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
- s.Fixtures.PoolMgrMock.On("RefreshState", s.Fixtures.UpdatePoolStateParams).Return(s.Fixtures.ErrMock)
_, err := s.Runner.UpdateEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.UpdateRepoParams)
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("updating enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("failed to get enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *EnterpriseTestSuite) TestUpdateEnterpriseCreateEnterprisePoolMgrFailed() {
- s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
- s.Fixtures.PoolMgrCtrlMock.On("CreateEnterprisePoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Enterprise"), s.Fixtures.Providers, s.Fixtures.Store).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
+ s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
_, err := s.Runner.UpdateEnterprise(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.UpdateRepoParams)
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("creating enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("failed to get enterprise pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *EnterpriseTestSuite) TestCreateEnterprisePool() {
- s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
-
pool, err := s.Runner.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
@@ -364,30 +431,21 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolErrUnauthorized() {
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
-func (s *EnterpriseTestSuite) TestCreateEnterprisePoolErrNotFound() {
- s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, runnerErrors.ErrNotFound)
-
- _, err := s.Runner.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
-
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
- s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(runnerErrors.ErrNotFound, err)
-}
-
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolFetchPoolParamsFailed() {
- s.Fixtures.CreatePoolParams.ProviderName = "not-existent-provider-name"
-
- s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
-
+ s.Fixtures.CreatePoolParams.ProviderName = notExistingProviderName
_, err := s.Runner.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Regexp("fetching pool params: no such provider", err.Error())
+ s.Require().Regexp("failed to append tags to create pool params: no such provider not-existent-provider-name", err.Error())
}
func (s *EnterpriseTestSuite) TestGetEnterprisePoolByID() {
- enterprisePool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ enterprisePool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %s", err))
}
@@ -405,7 +463,11 @@ func (s *EnterpriseTestSuite) TestGetEnterprisePoolByIDErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePool() {
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %s", err))
}
@@ -414,8 +476,8 @@ func (s *EnterpriseTestSuite) TestDeleteEnterprisePool() {
s.Require().Nil(err)
- _, err = s.Fixtures.Store.GetEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Fixtures.Store.GetEntityPool(s.Fixtures.AdminContext, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolErrUnauthorized() {
@@ -425,7 +487,11 @@ func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolRunnersFailed() {
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
@@ -440,10 +506,14 @@ func (s *EnterpriseTestSuite) TestDeleteEnterprisePoolRunnersFailed() {
}
func (s *EnterpriseTestSuite) TestListEnterprisePools() {
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
enterprisePools := []params.Pool{}
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Image = fmt.Sprintf("test-enterprise-%v", i)
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
@@ -463,7 +533,11 @@ func (s *EnterpriseTestSuite) TestListOrgPoolsErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestUpdateEnterprisePool() {
- enterprisePool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ enterprisePool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %s", err))
}
@@ -482,7 +556,11 @@ func (s *EnterpriseTestSuite) TestUpdateEnterprisePoolErrUnauthorized() {
}
func (s *EnterpriseTestSuite) TestUpdateEnterprisePoolMinIdleGreaterThanMax() {
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %s", err))
}
@@ -497,7 +575,11 @@ func (s *EnterpriseTestSuite) TestUpdateEnterprisePoolMinIdleGreaterThanMax() {
}
func (s *EnterpriseTestSuite) TestListEnterpriseInstances() {
- pool, err := s.Fixtures.Store.CreateEnterprisePool(s.Fixtures.AdminContext, s.Fixtures.StoreEnterprises["test-enterprise-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreEnterprises["test-enterprise-1"].ID,
+ EntityType: params.ForgeEntityTypeEnterprise,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create enterprise pool: %v", err))
}
@@ -526,7 +608,7 @@ func (s *EnterpriseTestSuite) TestListEnterpriseInstancesErrUnauthorized() {
func (s *EnterpriseTestSuite) TestFindEnterprisePoolManager() {
s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, nil)
- poolManager, err := s.Runner.findEnterprisePoolManager(s.Fixtures.StoreEnterprises["test-enterprise-1"].Name)
+ poolManager, err := s.Runner.findEnterprisePoolManager(s.Fixtures.StoreEnterprises["test-enterprise-1"].Name, s.Fixtures.StoreEnterprises["test-enterprise-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
@@ -537,7 +619,7 @@ func (s *EnterpriseTestSuite) TestFindEnterprisePoolManager() {
func (s *EnterpriseTestSuite) TestFindEnterprisePoolManagerFetchPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("GetEnterprisePoolManager", mock.AnythingOfType("params.Enterprise")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
- _, err := s.Runner.findEnterprisePoolManager(s.Fixtures.StoreEnterprises["test-enterprise-1"].Name)
+ _, err := s.Runner.findEnterprisePoolManager(s.Fixtures.StoreEnterprises["test-enterprise-1"].Name, s.Fixtures.StoreEnterprises["test-enterprise-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
diff --git a/runner/gitea_credentials.go b/runner/gitea_credentials.go
new file mode 100644
index 00000000..d66212f9
--- /dev/null
+++ b/runner/gitea_credentials.go
@@ -0,0 +1,99 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "context"
+ "fmt"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+)
+
+func (r *Runner) ListGiteaCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return nil, runnerErrors.ErrUnauthorized
+ }
+
+ // Get the credentials from the store. The cache is always updated after the database successfully
+ // commits the transaction that created/updated the credentials.
+ // If we create a set of credentials then immediately after we call ListGiteaCredentials,
+ // there is a posibillity that not all creds will be in the cache.
+ creds, err := r.store.ListGiteaCredentials(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching gitea credentials: %w", err)
+ }
+ return creds, nil
+}
+
+func (r *Runner) CreateGiteaCredentials(ctx context.Context, param params.CreateGiteaCredentialsParams) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error failed to validate gitea credentials params: %w", err)
+ }
+
+ creds, err := r.store.CreateGiteaCredentials(ctx, param)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error failed to create gitea credentials: %w", err)
+ }
+
+ return creds, nil
+}
+
+func (r *Runner) GetGiteaCredentials(ctx context.Context, id uint) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ creds, err := r.store.GetGiteaCredentials(ctx, id, true)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error failed to get gitea credentials: %w", err)
+ }
+
+ return creds, nil
+}
+
+func (r *Runner) DeleteGiteaCredentials(ctx context.Context, id uint) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ if err := r.store.DeleteGiteaCredentials(ctx, id); err != nil {
+ return fmt.Errorf("error failed to delete gitea credentials: %w", err)
+ }
+
+ return nil
+}
+
+func (r *Runner) UpdateGiteaCredentials(ctx context.Context, id uint, param params.UpdateGiteaCredentialsParams) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error failed to validate gitea credentials params: %w", err)
+ }
+
+ newCreds, err := r.store.UpdateGiteaCredentials(ctx, id, param)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("error failed to update gitea credentials: %w", err)
+ }
+
+ return newCreds, nil
+}
diff --git a/runner/gitea_endpoints.go b/runner/gitea_endpoints.go
new file mode 100644
index 00000000..4a7e32d9
--- /dev/null
+++ b/runner/gitea_endpoints.go
@@ -0,0 +1,95 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "context"
+ "fmt"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+)
+
+func (r *Runner) CreateGiteaEndpoint(ctx context.Context, param params.CreateGiteaEndpointParams) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to validate gitea endpoint params: %w", err)
+ }
+
+ ep, err := r.store.CreateGiteaEndpoint(ctx, param)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to create gitea endpoint: %w", err)
+ }
+
+ return ep, nil
+}
+
+func (r *Runner) GetGiteaEndpoint(ctx context.Context, name string) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+ endpoint, err := r.store.GetGiteaEndpoint(ctx, name)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to get gitea endpoint: %w", err)
+ }
+
+ return endpoint, nil
+}
+
+func (r *Runner) DeleteGiteaEndpoint(ctx context.Context, name string) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ err := r.store.DeleteGiteaEndpoint(ctx, name)
+ if err != nil {
+ return fmt.Errorf("failed to delete gitea endpoint: %w", err)
+ }
+
+ return nil
+}
+
+func (r *Runner) UpdateGiteaEndpoint(ctx context.Context, name string, param params.UpdateGiteaEndpointParams) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to validate gitea endpoint params: %w", err)
+ }
+
+ newEp, err := r.store.UpdateGiteaEndpoint(ctx, name, param)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to update gitea endpoint: %w", err)
+ }
+ return newEp, nil
+}
+
+func (r *Runner) ListGiteaEndpoints(ctx context.Context) ([]params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return nil, runnerErrors.ErrUnauthorized
+ }
+
+ endpoints, err := r.store.ListGiteaEndpoints(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list gitea endpoints: %w", err)
+ }
+
+ return endpoints, nil
+}
diff --git a/runner/github_credentials.go b/runner/github_credentials.go
new file mode 100644
index 00000000..5e1291ff
--- /dev/null
+++ b/runner/github_credentials.go
@@ -0,0 +1,115 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "context"
+ "fmt"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/cache"
+ "github.com/cloudbase/garm/params"
+)
+
+func (r *Runner) ListCredentials(ctx context.Context) ([]params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return nil, runnerErrors.ErrUnauthorized
+ }
+
+ // Get the credentials from the store. The cache is always updated after the database successfully
+ // commits the transaction that created/updated the credentials.
+ // If we create a set of credentials then immediately after we call ListCredentials,
+ // there is a posibillity that not all creds will be in the cache.
+ creds, err := r.store.ListGithubCredentials(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github credentials: %w", err)
+ }
+
+ // If we do have cache, update the rate limit for each credential. The rate limits are queried
+ // every 30 seconds and set in cache.
+ credsCache := cache.GetAllGithubCredentialsAsMap()
+ for idx, cred := range creds {
+ inCache, ok := credsCache[cred.ID]
+ if ok {
+ creds[idx].RateLimit = inCache.RateLimit
+ }
+ }
+ return creds, nil
+}
+
+func (r *Runner) CreateGithubCredentials(ctx context.Context, param params.CreateGithubCredentialsParams) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("failed to validate github credentials params: %w", err)
+ }
+
+ creds, err := r.store.CreateGithubCredentials(ctx, param)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("failed to create github credentials: %w", err)
+ }
+
+ return creds, nil
+}
+
+func (r *Runner) GetGithubCredentials(ctx context.Context, id uint) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ creds, err := r.store.GetGithubCredentials(ctx, id, true)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("failed to get github credentials: %w", err)
+ }
+
+ cached, ok := cache.GetGithubCredentials((creds.ID))
+ if ok {
+ creds.RateLimit = cached.RateLimit
+ }
+
+ return creds, nil
+}
+
+func (r *Runner) DeleteGithubCredentials(ctx context.Context, id uint) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ if err := r.store.DeleteGithubCredentials(ctx, id); err != nil {
+ return fmt.Errorf("failed to delete github credentials: %w", err)
+ }
+
+ return nil
+}
+
+func (r *Runner) UpdateGithubCredentials(ctx context.Context, id uint, param params.UpdateGithubCredentialsParams) (params.ForgeCredentials, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeCredentials{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("failed to validate github credentials params: %w", err)
+ }
+
+ newCreds, err := r.store.UpdateGithubCredentials(ctx, id, param)
+ if err != nil {
+ return params.ForgeCredentials{}, fmt.Errorf("failed to update github credentials: %w", err)
+ }
+
+ return newCreds, nil
+}
diff --git a/runner/github_endpoints.go b/runner/github_endpoints.go
new file mode 100644
index 00000000..29965081
--- /dev/null
+++ b/runner/github_endpoints.go
@@ -0,0 +1,95 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "context"
+ "fmt"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+)
+
+func (r *Runner) CreateGithubEndpoint(ctx context.Context, param params.CreateGithubEndpointParams) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error failed to validate github endpoint params: %w", err)
+ }
+
+ ep, err := r.store.CreateGithubEndpoint(ctx, param)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to create github endpoint: %w", err)
+ }
+
+ return ep, nil
+}
+
+func (r *Runner) GetGithubEndpoint(ctx context.Context, name string) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+ endpoint, err := r.store.GetGithubEndpoint(ctx, name)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to get github endpoint: %w", err)
+ }
+
+ return endpoint, nil
+}
+
+func (r *Runner) DeleteGithubEndpoint(ctx context.Context, name string) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ err := r.store.DeleteGithubEndpoint(ctx, name)
+ if err != nil {
+ return fmt.Errorf("failed to delete github endpoint: %w", err)
+ }
+
+ return nil
+}
+
+func (r *Runner) UpdateGithubEndpoint(ctx context.Context, name string, param params.UpdateGithubEndpointParams) (params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ForgeEndpoint{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to validate github endpoint params: %w", err)
+ }
+
+ newEp, err := r.store.UpdateGithubEndpoint(ctx, name, param)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("failed to update github endpoint: %w", err)
+ }
+ return newEp, nil
+}
+
+func (r *Runner) ListGithubEndpoints(ctx context.Context) ([]params.ForgeEndpoint, error) {
+ if !auth.IsAdmin(ctx) {
+ return nil, runnerErrors.ErrUnauthorized
+ }
+
+ endpoints, err := r.store.ListGithubEndpoints(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list github endpoints: %w", err)
+ }
+
+ return endpoints, nil
+}
diff --git a/runner/interfaces.go b/runner/interfaces.go
index ff8129ed..3d4703f7 100644
--- a/runner/interfaces.go
+++ b/runner/interfaces.go
@@ -43,7 +43,7 @@ type EnterprisePoolManager interface {
GetEnterprisePoolManagers() (map[string]common.PoolManager, error)
}
-//go:generate mockery --name=PoolManagerController
+//go:generate go run github.com/vektra/mockery/v2@latest
type PoolManagerController interface {
RepoPoolManager
diff --git a/runner/metadata.go b/runner/metadata.go
new file mode 100644
index 00000000..b309b96e
--- /dev/null
+++ b/runner/metadata.go
@@ -0,0 +1,300 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "html/template"
+ "log/slog"
+
+ "github.com/cloudbase/garm-provider-common/defaults"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+)
+
+var githubSystemdUnitTemplate = `[Unit]
+Description=GitHub Actions Runner ({{.ServiceName}})
+After=network.target
+
+[Service]
+ExecStart=/home/{{.RunAsUser}}/actions-runner/runsvc.sh
+User={{.RunAsUser}}
+WorkingDirectory=/home/{{.RunAsUser}}/actions-runner
+KillMode=process
+KillSignal=SIGTERM
+TimeoutStopSec=5min
+
+[Install]
+WantedBy=multi-user.target
+`
+
+var giteaSystemdUnitTemplate = `[Unit]
+Description=Act Runner ({{.ServiceName}})
+After=network.target
+
+[Service]
+ExecStart=/home/{{.RunAsUser}}/act-runner/act_runner daemon --once
+User={{.RunAsUser}}
+WorkingDirectory=/home/{{.RunAsUser}}/act-runner
+KillMode=process
+KillSignal=SIGTERM
+TimeoutStopSec=5min
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+`
+
+func validateInstanceState(ctx context.Context) (params.Instance, error) {
+ status := auth.InstanceRunnerStatus(ctx)
+ if status != params.RunnerPending && status != params.RunnerInstalling {
+ return params.Instance{}, runnerErrors.ErrUnauthorized
+ }
+
+ instance, err := auth.InstanceParams(ctx)
+ if err != nil {
+ return params.Instance{}, runnerErrors.ErrUnauthorized
+ }
+ return instance, nil
+}
+
+func (r *Runner) getForgeEntityFromInstance(ctx context.Context, instance params.Instance) (params.ForgeEntity, error) {
+ var entityGetter params.EntityGetter
+ var err error
+ switch {
+ case instance.PoolID != "":
+ entityGetter, err = r.store.GetPoolByID(r.ctx, instance.PoolID)
+ case instance.ScaleSetID != 0:
+ entityGetter, err = r.store.GetScaleSetByID(r.ctx, instance.ScaleSetID)
+ default:
+ return params.ForgeEntity{}, errors.New("instance not associated with a pool or scale set")
+ }
+
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get entity getter",
+ "instance", instance.Name)
+ return params.ForgeEntity{}, fmt.Errorf("error fetching entity getter: %w", err)
+ }
+
+ poolEntity, err := entityGetter.GetEntity()
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get entity",
+ "instance", instance.Name)
+ return params.ForgeEntity{}, fmt.Errorf("error fetching entity: %w", err)
+ }
+
+ entity, err := r.store.GetForgeEntity(r.ctx, poolEntity.EntityType, poolEntity.ID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get entity",
+ "instance", instance.Name)
+ return params.ForgeEntity{}, fmt.Errorf("error fetching entity: %w", err)
+ }
+ return entity, nil
+}
+
+func (r *Runner) getServiceNameForEntity(entity params.ForgeEntity) (string, error) {
+ switch entity.EntityType {
+ case params.ForgeEntityTypeEnterprise:
+ return fmt.Sprintf("actions.runner.%s.%s", entity.Owner, entity.Name), nil
+ case params.ForgeEntityTypeOrganization:
+ return fmt.Sprintf("actions.runner.%s.%s", entity.Owner, entity.Name), nil
+ case params.ForgeEntityTypeRepository:
+ return fmt.Sprintf("actions.runner.%s-%s.%s", entity.Owner, entity.Name, entity.Name), nil
+ default:
+ return "", errors.New("unknown entity type")
+ }
+}
+
+func (r *Runner) GetRunnerServiceName(ctx context.Context) (string, error) {
+ instance, err := validateInstanceState(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get instance params")
+ return "", runnerErrors.ErrUnauthorized
+ }
+ entity, err := r.getForgeEntityFromInstance(ctx, instance)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to get entity", "error", err)
+ return "", fmt.Errorf("error fetching entity: %w", err)
+ }
+
+ serviceName, err := r.getServiceNameForEntity(entity)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to get service name", "error", err)
+ return "", fmt.Errorf("error fetching service name: %w", err)
+ }
+ return serviceName, nil
+}
+
+func (r *Runner) GenerateSystemdUnitFile(ctx context.Context, runAsUser string) ([]byte, error) {
+ instance, err := validateInstanceState(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get instance params")
+ return nil, runnerErrors.ErrUnauthorized
+ }
+ entity, err := r.getForgeEntityFromInstance(ctx, instance)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to get entity", "error", err)
+ return nil, fmt.Errorf("error fetching entity: %w", err)
+ }
+
+ serviceName, err := r.getServiceNameForEntity(entity)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to get service name", "error", err)
+ return nil, fmt.Errorf("error fetching service name: %w", err)
+ }
+
+ var unitTemplate *template.Template
+ switch entity.Credentials.ForgeType {
+ case params.GithubEndpointType:
+ unitTemplate, err = template.New("").Parse(githubSystemdUnitTemplate)
+ case params.GiteaEndpointType:
+ unitTemplate, err = template.New("").Parse(giteaSystemdUnitTemplate)
+ default:
+ slog.ErrorContext(r.ctx, "unknown forge type", "forge_type", entity.Credentials.ForgeType)
+ return nil, errors.New("unknown forge type")
+ }
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to parse template", "error", err)
+ return nil, fmt.Errorf("error parsing template: %w", err)
+ }
+
+ if runAsUser == "" {
+ runAsUser = defaults.DefaultUser
+ }
+
+ data := struct {
+ ServiceName string
+ RunAsUser string
+ }{
+ ServiceName: serviceName,
+ RunAsUser: runAsUser,
+ }
+
+ var unitFile bytes.Buffer
+ if err := unitTemplate.Execute(&unitFile, data); err != nil {
+ slog.ErrorContext(r.ctx, "failed to execute template", "error", err)
+ return nil, fmt.Errorf("error executing template: %w", err)
+ }
+ return unitFile.Bytes(), nil
+}
+
+func (r *Runner) GetJITConfigFile(ctx context.Context, file string) ([]byte, error) {
+ if !auth.InstanceHasJITConfig(ctx) {
+ return nil, runnerErrors.NewNotFoundError("instance not configured for JIT")
+ }
+
+ instance, err := validateInstanceState(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get instance params")
+ return nil, runnerErrors.ErrUnauthorized
+ }
+ jitConfig := instance.JitConfiguration
+ contents, ok := jitConfig[file]
+ if !ok {
+ return nil, runnerErrors.NewNotFoundError("could not find file %q", file)
+ }
+
+ decoded, err := base64.StdEncoding.DecodeString(contents)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding file contents: %w", err)
+ }
+
+ return decoded, nil
+}
+
+func (r *Runner) GetInstanceGithubRegistrationToken(ctx context.Context) (string, error) {
+ // Check if this instance already fetched a registration token or if it was configured using
+ // the new Just In Time runner feature. If we're still using the old way of configuring a runner,
+ // we only allow an instance to fetch one token. If the instance fails to bootstrap after a token
+ // is fetched, we reset the token fetched field when re-queueing the instance.
+ if auth.InstanceTokenFetched(ctx) || auth.InstanceHasJITConfig(ctx) {
+ return "", runnerErrors.ErrUnauthorized
+ }
+
+ status := auth.InstanceRunnerStatus(ctx)
+ if status != params.RunnerPending && status != params.RunnerInstalling {
+ return "", runnerErrors.ErrUnauthorized
+ }
+
+ instance, err := auth.InstanceParams(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get instance params")
+ return "", runnerErrors.ErrUnauthorized
+ }
+
+ poolMgr, err := r.getPoolManagerFromInstance(ctx, instance)
+ if err != nil {
+ return "", fmt.Errorf("error fetching pool manager for instance: %w", err)
+ }
+
+ token, err := poolMgr.GithubRunnerRegistrationToken()
+ if err != nil {
+ return "", fmt.Errorf("error fetching runner token: %w", err)
+ }
+
+ tokenFetched := true
+ updateParams := params.UpdateInstanceParams{
+ TokenFetched: &tokenFetched,
+ }
+
+ if _, err := r.store.UpdateInstance(r.ctx, instance.Name, updateParams); err != nil {
+ return "", fmt.Errorf("error setting token_fetched for instance: %w", err)
+ }
+
+ if err := r.store.AddInstanceEvent(ctx, instance.Name, params.FetchTokenEvent, params.EventInfo, "runner registration token was retrieved"); err != nil {
+ return "", fmt.Errorf("error recording event: %w", err)
+ }
+
+ return token, nil
+}
+
+func (r *Runner) GetRootCertificateBundle(ctx context.Context) (params.CertificateBundle, error) {
+ instance, err := auth.InstanceParams(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get instance params")
+ return params.CertificateBundle{}, runnerErrors.ErrUnauthorized
+ }
+
+ poolMgr, err := r.getPoolManagerFromInstance(ctx, instance)
+ if err != nil {
+ return params.CertificateBundle{}, fmt.Errorf("error fetching pool manager for instance: %w", err)
+ }
+
+ bundle, err := poolMgr.RootCABundle()
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to get root CA bundle",
+ "instance", instance.Name,
+ "pool_manager", poolMgr.ID())
+ // The root CA bundle is invalid. Return an empty bundle to the runner and log the event.
+ return params.CertificateBundle{
+ RootCertificates: make(map[string][]byte),
+ }, nil
+ }
+ return bundle, nil
+}
diff --git a/runner/metrics/enterprise.go b/runner/metrics/enterprise.go
new file mode 100644
index 00000000..be6eba66
--- /dev/null
+++ b/runner/metrics/enterprise.go
@@ -0,0 +1,50 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner" //nolint:typecheck
+)
+
+// CollectOrganizationMetric collects the metrics for the enterprise objects
+func CollectEnterpriseMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.EnterpriseInfo.Reset()
+ metrics.EnterprisePoolManagerStatus.Reset()
+
+ enterprises, err := r.ListEnterprises(ctx, params.EnterpriseFilter{})
+ if err != nil {
+ return err
+ }
+
+ for _, enterprise := range enterprises {
+ metrics.EnterpriseInfo.WithLabelValues(
+ enterprise.Name, // label: name
+ enterprise.ID, // label: id
+ ).Set(1)
+
+ metrics.EnterprisePoolManagerStatus.WithLabelValues(
+ enterprise.Name, // label: name
+ enterprise.ID, // label: id
+ strconv.FormatBool(enterprise.PoolManagerStatus.IsRunning), // label: running
+ ).Set(metrics.Bool2float64(enterprise.PoolManagerStatus.IsRunning))
+ }
+ return nil
+}
diff --git a/runner/metrics/health.go b/runner/metrics/health.go
new file mode 100644
index 00000000..fcd254df
--- /dev/null
+++ b/runner/metrics/health.go
@@ -0,0 +1,31 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+)
+
+func CollectHealthMetric(controllerInfo params.ControllerInfo) error {
+ metrics.GarmHealth.WithLabelValues(
+ controllerInfo.MetadataURL, // label: metadata_url
+ controllerInfo.CallbackURL, // label: callback_url
+ controllerInfo.WebhookURL, // label: webhook_url
+ controllerInfo.ControllerWebhookURL, // label: controller_webhook_url
+ controllerInfo.ControllerID.String(), // label: controller_id
+ ).Set(1)
+ return nil
+}
diff --git a/runner/metrics/instance.go b/runner/metrics/instance.go
new file mode 100644
index 00000000..bc6bed0a
--- /dev/null
+++ b/runner/metrics/instance.go
@@ -0,0 +1,79 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/runner"
+)
+
+// CollectInstanceMetric collects the metrics for the runner instances
+// reflecting the statuses and the pool they belong to.
+func CollectInstanceMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.InstanceStatus.Reset()
+
+ instances, err := r.ListAllInstances(ctx)
+ if err != nil {
+ return err
+ }
+
+ pools, err := r.ListAllPools(ctx)
+ if err != nil {
+ return err
+ }
+
+ type poolInfo struct {
+ Name string
+ Type string
+ ProviderName string
+ }
+
+ poolNames := make(map[string]poolInfo)
+ for _, pool := range pools {
+ switch {
+ case pool.OrgName != "":
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.OrgName,
+ Type: string(pool.PoolType()),
+ }
+ case pool.EnterpriseName != "":
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.EnterpriseName,
+ Type: string(pool.PoolType()),
+ }
+ default:
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.RepoName,
+ Type: string(pool.PoolType()),
+ }
+ }
+ }
+
+ for _, instance := range instances {
+ metrics.InstanceStatus.WithLabelValues(
+ instance.Name, // label: name
+ string(instance.Status), // label: status
+ string(instance.RunnerStatus), // label: runner_status
+ poolNames[instance.PoolID].Name, // label: pool_owner
+ poolNames[instance.PoolID].Type, // label: pool_type
+ instance.PoolID, // label: pool_id
+ poolNames[instance.PoolID].ProviderName, // label: provider
+ ).Set(1)
+ }
+ return nil
+}
diff --git a/runner/metrics/metrics.go b/runner/metrics/metrics.go
new file mode 100644
index 00000000..772ba86a
--- /dev/null
+++ b/runner/metrics/metrics.go
@@ -0,0 +1,104 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+ "log/slog"
+ "time"
+
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner"
+)
+
+func CollectObjectMetric(ctx context.Context, r *runner.Runner, duration time.Duration) {
+ ctx = auth.GetAdminContext(ctx)
+
+ // get controller info for health metrics
+ controllerInfo, err := r.GetControllerInfo(ctx)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "cannot get controller info")
+ }
+
+ // we do not want to wait until the first ticker happens
+ // for that we start an initial collection immediately
+ slog.DebugContext(ctx, "collecting metrics")
+ if err := collectMetrics(ctx, r, controllerInfo); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "cannot collect metrics")
+ }
+
+ go func() {
+ ticker := time.NewTicker(duration)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ slog.DebugContext(ctx, "collecting metrics")
+
+ if err := collectMetrics(ctx, r, controllerInfo); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "cannot collect metrics")
+ }
+ }
+ }
+ }()
+}
+
+func collectMetrics(ctx context.Context, r *runner.Runner, controllerInfo params.ControllerInfo) error {
+ slog.DebugContext(ctx, "collecting organization metrics")
+ err := CollectOrganizationMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting enterprise metrics")
+ err = CollectEnterpriseMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting repository metrics")
+ err = CollectRepositoryMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting provider metrics")
+ err = CollectProviderMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting pool metrics")
+ err = CollectPoolMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting instance metrics")
+ err = CollectInstanceMetric(ctx, r)
+ if err != nil {
+ return err
+ }
+
+ slog.DebugContext(ctx, "collecting health metrics")
+ err = CollectHealthMetric(controllerInfo)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/runner/metrics/organization.go b/runner/metrics/organization.go
new file mode 100644
index 00000000..6bf6d9e5
--- /dev/null
+++ b/runner/metrics/organization.go
@@ -0,0 +1,50 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner"
+)
+
+// CollectOrganizationMetric collects the metrics for the organization objects
+func CollectOrganizationMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.OrganizationInfo.Reset()
+ metrics.OrganizationPoolManagerStatus.Reset()
+
+ organizations, err := r.ListOrganizations(ctx, params.OrganizationFilter{})
+ if err != nil {
+ return err
+ }
+
+ for _, organization := range organizations {
+ metrics.OrganizationInfo.WithLabelValues(
+ organization.Name, // label: name
+ organization.ID, // label: id
+ ).Set(1)
+
+ metrics.OrganizationPoolManagerStatus.WithLabelValues(
+ organization.Name, // label: name
+ organization.ID, // label: id
+ strconv.FormatBool(organization.PoolManagerStatus.IsRunning), // label: running
+ ).Set(metrics.Bool2float64(organization.PoolManagerStatus.IsRunning))
+ }
+ return nil
+}
diff --git a/runner/metrics/pool.go b/runner/metrics/pool.go
new file mode 100644
index 00000000..6b06a8b9
--- /dev/null
+++ b/runner/metrics/pool.go
@@ -0,0 +1,101 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+ "strconv"
+ "strings"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/runner"
+)
+
+// CollectPoolMetric collects the metrics for the pool objects
+func CollectPoolMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.PoolInfo.Reset()
+ metrics.PoolStatus.Reset()
+ metrics.PoolMaxRunners.Reset()
+ metrics.PoolMinIdleRunners.Reset()
+ metrics.PoolBootstrapTimeout.Reset()
+
+ pools, err := r.ListAllPools(ctx)
+ if err != nil {
+ return err
+ }
+
+ type poolInfo struct {
+ Name string
+ Type string
+ }
+
+ poolNames := make(map[string]poolInfo)
+ for _, pool := range pools {
+ switch {
+ case pool.OrgName != "":
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.OrgName,
+ Type: string(pool.PoolType()),
+ }
+ case pool.EnterpriseName != "":
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.EnterpriseName,
+ Type: string(pool.PoolType()),
+ }
+ default:
+ poolNames[pool.ID] = poolInfo{
+ Name: pool.RepoName,
+ Type: string(pool.PoolType()),
+ }
+ }
+
+ var poolTags []string
+ for _, tag := range pool.Tags {
+ poolTags = append(poolTags, tag.Name)
+ }
+
+ metrics.PoolInfo.WithLabelValues(
+ pool.ID, // label: id
+ pool.Image, // label: image
+ pool.Flavor, // label: flavor
+ pool.Prefix, // label: prefix
+ string(pool.OSType), // label: os_type
+ string(pool.OSArch), // label: os_arch
+ strings.Join(poolTags, ","), // label: tags
+ pool.ProviderName, // label: provider
+ poolNames[pool.ID].Name, // label: pool_owner
+ poolNames[pool.ID].Type, // label: pool_type
+ ).Set(1)
+
+ metrics.PoolStatus.WithLabelValues(
+ pool.ID, // label: id
+ strconv.FormatBool(pool.Enabled), // label: enabled
+ ).Set(metrics.Bool2float64(pool.Enabled))
+
+ metrics.PoolMaxRunners.WithLabelValues(
+ pool.ID, // label: id
+ ).Set(float64(pool.MaxRunners))
+
+ metrics.PoolMinIdleRunners.WithLabelValues(
+ pool.ID, // label: id
+ ).Set(float64(pool.MinIdleRunners))
+
+ metrics.PoolBootstrapTimeout.WithLabelValues(
+ pool.ID, // label: id
+ ).Set(float64(pool.RunnerBootstrapTimeout))
+ }
+ return nil
+}
diff --git a/runner/metrics/provider.go b/runner/metrics/provider.go
new file mode 100644
index 00000000..1d7a065d
--- /dev/null
+++ b/runner/metrics/provider.go
@@ -0,0 +1,40 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/runner"
+)
+
+func CollectProviderMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.ProviderInfo.Reset()
+
+ providers, err := r.ListProviders(ctx)
+ if err != nil {
+ return err
+ }
+ for _, provider := range providers {
+ metrics.ProviderInfo.WithLabelValues(
+ provider.Name, // label: name
+ string(provider.ProviderType), // label: type
+ provider.Description, // label: description
+ ).Set(1)
+ }
+ return nil
+}
diff --git a/runner/metrics/repository.go b/runner/metrics/repository.go
new file mode 100644
index 00000000..a2e8fa57
--- /dev/null
+++ b/runner/metrics/repository.go
@@ -0,0 +1,49 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package metrics
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner"
+)
+
+func CollectRepositoryMetric(ctx context.Context, r *runner.Runner) error {
+ // reset metrics
+ metrics.EnterpriseInfo.Reset()
+ metrics.EnterprisePoolManagerStatus.Reset()
+
+ repositories, err := r.ListRepositories(ctx, params.RepositoryFilter{})
+ if err != nil {
+ return err
+ }
+
+ for _, repository := range repositories {
+ metrics.EnterpriseInfo.WithLabelValues(
+ repository.Name, // label: name
+ repository.ID, // label: id
+ ).Set(1)
+
+ metrics.EnterprisePoolManagerStatus.WithLabelValues(
+ repository.Name, // label: name
+ repository.ID, // label: id
+ strconv.FormatBool(repository.PoolManagerStatus.IsRunning), // label: running
+ ).Set(metrics.Bool2float64(repository.PoolManagerStatus.IsRunning))
+ }
+ return nil
+}
diff --git a/runner/mocks/PoolManagerController.go b/runner/mocks/PoolManagerController.go
index 0727b1a2..b17196ec 100644
--- a/runner/mocks/PoolManagerController.go
+++ b/runner/mocks/PoolManagerController.go
@@ -1,4 +1,4 @@
-// Code generated by mockery v2.22.1. DO NOT EDIT.
+// Code generated by mockery. DO NOT EDIT.
package mocks
@@ -19,10 +19,22 @@ type PoolManagerController struct {
mock.Mock
}
+type PoolManagerController_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *PoolManagerController) EXPECT() *PoolManagerController_Expecter {
+ return &PoolManagerController_Expecter{mock: &_m.Mock}
+}
+
// CreateEnterprisePoolManager provides a mock function with given fields: ctx, enterprise, providers, store
func (_m *PoolManagerController) CreateEnterprisePoolManager(ctx context.Context, enterprise params.Enterprise, providers map[string]common.Provider, store databasecommon.Store) (common.PoolManager, error) {
ret := _m.Called(ctx, enterprise, providers, store)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateEnterprisePoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, params.Enterprise, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)); ok {
@@ -45,10 +57,45 @@ func (_m *PoolManagerController) CreateEnterprisePoolManager(ctx context.Context
return r0, r1
}
+// PoolManagerController_CreateEnterprisePoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateEnterprisePoolManager'
+type PoolManagerController_CreateEnterprisePoolManager_Call struct {
+ *mock.Call
+}
+
+// CreateEnterprisePoolManager is a helper method to define mock.On call
+// - ctx context.Context
+// - enterprise params.Enterprise
+// - providers map[string]common.Provider
+// - store databasecommon.Store
+func (_e *PoolManagerController_Expecter) CreateEnterprisePoolManager(ctx interface{}, enterprise interface{}, providers interface{}, store interface{}) *PoolManagerController_CreateEnterprisePoolManager_Call {
+ return &PoolManagerController_CreateEnterprisePoolManager_Call{Call: _e.mock.On("CreateEnterprisePoolManager", ctx, enterprise, providers, store)}
+}
+
+func (_c *PoolManagerController_CreateEnterprisePoolManager_Call) Run(run func(ctx context.Context, enterprise params.Enterprise, providers map[string]common.Provider, store databasecommon.Store)) *PoolManagerController_CreateEnterprisePoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.Enterprise), args[2].(map[string]common.Provider), args[3].(databasecommon.Store))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_CreateEnterprisePoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_CreateEnterprisePoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_CreateEnterprisePoolManager_Call) RunAndReturn(run func(context.Context, params.Enterprise, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)) *PoolManagerController_CreateEnterprisePoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateOrgPoolManager provides a mock function with given fields: ctx, org, providers, store
func (_m *PoolManagerController) CreateOrgPoolManager(ctx context.Context, org params.Organization, providers map[string]common.Provider, store databasecommon.Store) (common.PoolManager, error) {
ret := _m.Called(ctx, org, providers, store)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateOrgPoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, params.Organization, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)); ok {
@@ -71,10 +118,45 @@ func (_m *PoolManagerController) CreateOrgPoolManager(ctx context.Context, org p
return r0, r1
}
+// PoolManagerController_CreateOrgPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateOrgPoolManager'
+type PoolManagerController_CreateOrgPoolManager_Call struct {
+ *mock.Call
+}
+
+// CreateOrgPoolManager is a helper method to define mock.On call
+// - ctx context.Context
+// - org params.Organization
+// - providers map[string]common.Provider
+// - store databasecommon.Store
+func (_e *PoolManagerController_Expecter) CreateOrgPoolManager(ctx interface{}, org interface{}, providers interface{}, store interface{}) *PoolManagerController_CreateOrgPoolManager_Call {
+ return &PoolManagerController_CreateOrgPoolManager_Call{Call: _e.mock.On("CreateOrgPoolManager", ctx, org, providers, store)}
+}
+
+func (_c *PoolManagerController_CreateOrgPoolManager_Call) Run(run func(ctx context.Context, org params.Organization, providers map[string]common.Provider, store databasecommon.Store)) *PoolManagerController_CreateOrgPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.Organization), args[2].(map[string]common.Provider), args[3].(databasecommon.Store))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_CreateOrgPoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_CreateOrgPoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_CreateOrgPoolManager_Call) RunAndReturn(run func(context.Context, params.Organization, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)) *PoolManagerController_CreateOrgPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateRepoPoolManager provides a mock function with given fields: ctx, repo, providers, store
func (_m *PoolManagerController) CreateRepoPoolManager(ctx context.Context, repo params.Repository, providers map[string]common.Provider, store databasecommon.Store) (common.PoolManager, error) {
ret := _m.Called(ctx, repo, providers, store)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateRepoPoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, params.Repository, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)); ok {
@@ -97,10 +179,45 @@ func (_m *PoolManagerController) CreateRepoPoolManager(ctx context.Context, repo
return r0, r1
}
+// PoolManagerController_CreateRepoPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateRepoPoolManager'
+type PoolManagerController_CreateRepoPoolManager_Call struct {
+ *mock.Call
+}
+
+// CreateRepoPoolManager is a helper method to define mock.On call
+// - ctx context.Context
+// - repo params.Repository
+// - providers map[string]common.Provider
+// - store databasecommon.Store
+func (_e *PoolManagerController_Expecter) CreateRepoPoolManager(ctx interface{}, repo interface{}, providers interface{}, store interface{}) *PoolManagerController_CreateRepoPoolManager_Call {
+ return &PoolManagerController_CreateRepoPoolManager_Call{Call: _e.mock.On("CreateRepoPoolManager", ctx, repo, providers, store)}
+}
+
+func (_c *PoolManagerController_CreateRepoPoolManager_Call) Run(run func(ctx context.Context, repo params.Repository, providers map[string]common.Provider, store databasecommon.Store)) *PoolManagerController_CreateRepoPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(params.Repository), args[2].(map[string]common.Provider), args[3].(databasecommon.Store))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_CreateRepoPoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_CreateRepoPoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_CreateRepoPoolManager_Call) RunAndReturn(run func(context.Context, params.Repository, map[string]common.Provider, databasecommon.Store) (common.PoolManager, error)) *PoolManagerController_CreateRepoPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteEnterprisePoolManager provides a mock function with given fields: enterprise
func (_m *PoolManagerController) DeleteEnterprisePoolManager(enterprise params.Enterprise) error {
ret := _m.Called(enterprise)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteEnterprisePoolManager")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(params.Enterprise) error); ok {
r0 = rf(enterprise)
@@ -111,10 +228,42 @@ func (_m *PoolManagerController) DeleteEnterprisePoolManager(enterprise params.E
return r0
}
+// PoolManagerController_DeleteEnterprisePoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteEnterprisePoolManager'
+type PoolManagerController_DeleteEnterprisePoolManager_Call struct {
+ *mock.Call
+}
+
+// DeleteEnterprisePoolManager is a helper method to define mock.On call
+// - enterprise params.Enterprise
+func (_e *PoolManagerController_Expecter) DeleteEnterprisePoolManager(enterprise interface{}) *PoolManagerController_DeleteEnterprisePoolManager_Call {
+ return &PoolManagerController_DeleteEnterprisePoolManager_Call{Call: _e.mock.On("DeleteEnterprisePoolManager", enterprise)}
+}
+
+func (_c *PoolManagerController_DeleteEnterprisePoolManager_Call) Run(run func(enterprise params.Enterprise)) *PoolManagerController_DeleteEnterprisePoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Enterprise))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteEnterprisePoolManager_Call) Return(_a0 error) *PoolManagerController_DeleteEnterprisePoolManager_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteEnterprisePoolManager_Call) RunAndReturn(run func(params.Enterprise) error) *PoolManagerController_DeleteEnterprisePoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteOrgPoolManager provides a mock function with given fields: org
func (_m *PoolManagerController) DeleteOrgPoolManager(org params.Organization) error {
ret := _m.Called(org)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteOrgPoolManager")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(params.Organization) error); ok {
r0 = rf(org)
@@ -125,10 +274,42 @@ func (_m *PoolManagerController) DeleteOrgPoolManager(org params.Organization) e
return r0
}
+// PoolManagerController_DeleteOrgPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteOrgPoolManager'
+type PoolManagerController_DeleteOrgPoolManager_Call struct {
+ *mock.Call
+}
+
+// DeleteOrgPoolManager is a helper method to define mock.On call
+// - org params.Organization
+func (_e *PoolManagerController_Expecter) DeleteOrgPoolManager(org interface{}) *PoolManagerController_DeleteOrgPoolManager_Call {
+ return &PoolManagerController_DeleteOrgPoolManager_Call{Call: _e.mock.On("DeleteOrgPoolManager", org)}
+}
+
+func (_c *PoolManagerController_DeleteOrgPoolManager_Call) Run(run func(org params.Organization)) *PoolManagerController_DeleteOrgPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Organization))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteOrgPoolManager_Call) Return(_a0 error) *PoolManagerController_DeleteOrgPoolManager_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteOrgPoolManager_Call) RunAndReturn(run func(params.Organization) error) *PoolManagerController_DeleteOrgPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteRepoPoolManager provides a mock function with given fields: repo
func (_m *PoolManagerController) DeleteRepoPoolManager(repo params.Repository) error {
ret := _m.Called(repo)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteRepoPoolManager")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(params.Repository) error); ok {
r0 = rf(repo)
@@ -139,10 +320,42 @@ func (_m *PoolManagerController) DeleteRepoPoolManager(repo params.Repository) e
return r0
}
+// PoolManagerController_DeleteRepoPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteRepoPoolManager'
+type PoolManagerController_DeleteRepoPoolManager_Call struct {
+ *mock.Call
+}
+
+// DeleteRepoPoolManager is a helper method to define mock.On call
+// - repo params.Repository
+func (_e *PoolManagerController_Expecter) DeleteRepoPoolManager(repo interface{}) *PoolManagerController_DeleteRepoPoolManager_Call {
+ return &PoolManagerController_DeleteRepoPoolManager_Call{Call: _e.mock.On("DeleteRepoPoolManager", repo)}
+}
+
+func (_c *PoolManagerController_DeleteRepoPoolManager_Call) Run(run func(repo params.Repository)) *PoolManagerController_DeleteRepoPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Repository))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteRepoPoolManager_Call) Return(_a0 error) *PoolManagerController_DeleteRepoPoolManager_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *PoolManagerController_DeleteRepoPoolManager_Call) RunAndReturn(run func(params.Repository) error) *PoolManagerController_DeleteRepoPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetEnterprisePoolManager provides a mock function with given fields: enterprise
func (_m *PoolManagerController) GetEnterprisePoolManager(enterprise params.Enterprise) (common.PoolManager, error) {
ret := _m.Called(enterprise)
+ if len(ret) == 0 {
+ panic("no return value specified for GetEnterprisePoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(params.Enterprise) (common.PoolManager, error)); ok {
@@ -165,10 +378,42 @@ func (_m *PoolManagerController) GetEnterprisePoolManager(enterprise params.Ente
return r0, r1
}
-// GetEnterprisePoolManagers provides a mock function with given fields:
+// PoolManagerController_GetEnterprisePoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnterprisePoolManager'
+type PoolManagerController_GetEnterprisePoolManager_Call struct {
+ *mock.Call
+}
+
+// GetEnterprisePoolManager is a helper method to define mock.On call
+// - enterprise params.Enterprise
+func (_e *PoolManagerController_Expecter) GetEnterprisePoolManager(enterprise interface{}) *PoolManagerController_GetEnterprisePoolManager_Call {
+ return &PoolManagerController_GetEnterprisePoolManager_Call{Call: _e.mock.On("GetEnterprisePoolManager", enterprise)}
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManager_Call) Run(run func(enterprise params.Enterprise)) *PoolManagerController_GetEnterprisePoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Enterprise))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_GetEnterprisePoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManager_Call) RunAndReturn(run func(params.Enterprise) (common.PoolManager, error)) *PoolManagerController_GetEnterprisePoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetEnterprisePoolManagers provides a mock function with no fields
func (_m *PoolManagerController) GetEnterprisePoolManagers() (map[string]common.PoolManager, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for GetEnterprisePoolManagers")
+ }
+
var r0 map[string]common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func() (map[string]common.PoolManager, error)); ok {
@@ -191,10 +436,41 @@ func (_m *PoolManagerController) GetEnterprisePoolManagers() (map[string]common.
return r0, r1
}
+// PoolManagerController_GetEnterprisePoolManagers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnterprisePoolManagers'
+type PoolManagerController_GetEnterprisePoolManagers_Call struct {
+ *mock.Call
+}
+
+// GetEnterprisePoolManagers is a helper method to define mock.On call
+func (_e *PoolManagerController_Expecter) GetEnterprisePoolManagers() *PoolManagerController_GetEnterprisePoolManagers_Call {
+ return &PoolManagerController_GetEnterprisePoolManagers_Call{Call: _e.mock.On("GetEnterprisePoolManagers")}
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManagers_Call) Run(run func()) *PoolManagerController_GetEnterprisePoolManagers_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManagers_Call) Return(_a0 map[string]common.PoolManager, _a1 error) *PoolManagerController_GetEnterprisePoolManagers_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_GetEnterprisePoolManagers_Call) RunAndReturn(run func() (map[string]common.PoolManager, error)) *PoolManagerController_GetEnterprisePoolManagers_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetOrgPoolManager provides a mock function with given fields: org
func (_m *PoolManagerController) GetOrgPoolManager(org params.Organization) (common.PoolManager, error) {
ret := _m.Called(org)
+ if len(ret) == 0 {
+ panic("no return value specified for GetOrgPoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(params.Organization) (common.PoolManager, error)); ok {
@@ -217,10 +493,42 @@ func (_m *PoolManagerController) GetOrgPoolManager(org params.Organization) (com
return r0, r1
}
-// GetOrgPoolManagers provides a mock function with given fields:
+// PoolManagerController_GetOrgPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOrgPoolManager'
+type PoolManagerController_GetOrgPoolManager_Call struct {
+ *mock.Call
+}
+
+// GetOrgPoolManager is a helper method to define mock.On call
+// - org params.Organization
+func (_e *PoolManagerController_Expecter) GetOrgPoolManager(org interface{}) *PoolManagerController_GetOrgPoolManager_Call {
+ return &PoolManagerController_GetOrgPoolManager_Call{Call: _e.mock.On("GetOrgPoolManager", org)}
+}
+
+func (_c *PoolManagerController_GetOrgPoolManager_Call) Run(run func(org params.Organization)) *PoolManagerController_GetOrgPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Organization))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_GetOrgPoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_GetOrgPoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_GetOrgPoolManager_Call) RunAndReturn(run func(params.Organization) (common.PoolManager, error)) *PoolManagerController_GetOrgPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetOrgPoolManagers provides a mock function with no fields
func (_m *PoolManagerController) GetOrgPoolManagers() (map[string]common.PoolManager, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for GetOrgPoolManagers")
+ }
+
var r0 map[string]common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func() (map[string]common.PoolManager, error)); ok {
@@ -243,10 +551,41 @@ func (_m *PoolManagerController) GetOrgPoolManagers() (map[string]common.PoolMan
return r0, r1
}
+// PoolManagerController_GetOrgPoolManagers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOrgPoolManagers'
+type PoolManagerController_GetOrgPoolManagers_Call struct {
+ *mock.Call
+}
+
+// GetOrgPoolManagers is a helper method to define mock.On call
+func (_e *PoolManagerController_Expecter) GetOrgPoolManagers() *PoolManagerController_GetOrgPoolManagers_Call {
+ return &PoolManagerController_GetOrgPoolManagers_Call{Call: _e.mock.On("GetOrgPoolManagers")}
+}
+
+func (_c *PoolManagerController_GetOrgPoolManagers_Call) Run(run func()) *PoolManagerController_GetOrgPoolManagers_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_GetOrgPoolManagers_Call) Return(_a0 map[string]common.PoolManager, _a1 error) *PoolManagerController_GetOrgPoolManagers_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_GetOrgPoolManagers_Call) RunAndReturn(run func() (map[string]common.PoolManager, error)) *PoolManagerController_GetOrgPoolManagers_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetRepoPoolManager provides a mock function with given fields: repo
func (_m *PoolManagerController) GetRepoPoolManager(repo params.Repository) (common.PoolManager, error) {
ret := _m.Called(repo)
+ if len(ret) == 0 {
+ panic("no return value specified for GetRepoPoolManager")
+ }
+
var r0 common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func(params.Repository) (common.PoolManager, error)); ok {
@@ -269,10 +608,42 @@ func (_m *PoolManagerController) GetRepoPoolManager(repo params.Repository) (com
return r0, r1
}
-// GetRepoPoolManagers provides a mock function with given fields:
+// PoolManagerController_GetRepoPoolManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRepoPoolManager'
+type PoolManagerController_GetRepoPoolManager_Call struct {
+ *mock.Call
+}
+
+// GetRepoPoolManager is a helper method to define mock.On call
+// - repo params.Repository
+func (_e *PoolManagerController_Expecter) GetRepoPoolManager(repo interface{}) *PoolManagerController_GetRepoPoolManager_Call {
+ return &PoolManagerController_GetRepoPoolManager_Call{Call: _e.mock.On("GetRepoPoolManager", repo)}
+}
+
+func (_c *PoolManagerController_GetRepoPoolManager_Call) Run(run func(repo params.Repository)) *PoolManagerController_GetRepoPoolManager_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(params.Repository))
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_GetRepoPoolManager_Call) Return(_a0 common.PoolManager, _a1 error) *PoolManagerController_GetRepoPoolManager_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_GetRepoPoolManager_Call) RunAndReturn(run func(params.Repository) (common.PoolManager, error)) *PoolManagerController_GetRepoPoolManager_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetRepoPoolManagers provides a mock function with no fields
func (_m *PoolManagerController) GetRepoPoolManagers() (map[string]common.PoolManager, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for GetRepoPoolManagers")
+ }
+
var r0 map[string]common.PoolManager
var r1 error
if rf, ok := ret.Get(0).(func() (map[string]common.PoolManager, error)); ok {
@@ -295,13 +666,39 @@ func (_m *PoolManagerController) GetRepoPoolManagers() (map[string]common.PoolMa
return r0, r1
}
-type mockConstructorTestingTNewPoolManagerController interface {
- mock.TestingT
- Cleanup(func())
+// PoolManagerController_GetRepoPoolManagers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRepoPoolManagers'
+type PoolManagerController_GetRepoPoolManagers_Call struct {
+ *mock.Call
+}
+
+// GetRepoPoolManagers is a helper method to define mock.On call
+func (_e *PoolManagerController_Expecter) GetRepoPoolManagers() *PoolManagerController_GetRepoPoolManagers_Call {
+ return &PoolManagerController_GetRepoPoolManagers_Call{Call: _e.mock.On("GetRepoPoolManagers")}
+}
+
+func (_c *PoolManagerController_GetRepoPoolManagers_Call) Run(run func()) *PoolManagerController_GetRepoPoolManagers_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *PoolManagerController_GetRepoPoolManagers_Call) Return(_a0 map[string]common.PoolManager, _a1 error) *PoolManagerController_GetRepoPoolManagers_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *PoolManagerController_GetRepoPoolManagers_Call) RunAndReturn(run func() (map[string]common.PoolManager, error)) *PoolManagerController_GetRepoPoolManagers_Call {
+ _c.Call.Return(run)
+ return _c
}
// NewPoolManagerController creates a new instance of PoolManagerController. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-func NewPoolManagerController(t mockConstructorTestingTNewPoolManagerController) *PoolManagerController {
+// The first argument is typically a *testing.T value.
+func NewPoolManagerController(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *PoolManagerController {
mock := &PoolManagerController{}
mock.Mock.Test(t)
diff --git a/runner/organizations.go b/runner/organizations.go
index 6d6cca36..ffdd1c6c 100644
--- a/runner/organizations.go
+++ b/runner/organizations.go
@@ -16,17 +16,16 @@ package runner
import (
"context"
+ "errors"
"fmt"
- "log"
+ "log/slog"
"strings"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/auth"
- runnerErrors "github.com/cloudbase/garm/errors"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
"github.com/cloudbase/garm/util/appdefaults"
-
- "github.com/pkg/errors"
)
func (r *Runner) CreateOrganization(ctx context.Context, param params.CreateOrgParams) (org params.Organization, err error) {
@@ -35,57 +34,74 @@ func (r *Runner) CreateOrganization(ctx context.Context, param params.CreateOrgP
}
if err := param.Validate(); err != nil {
- return params.Organization{}, errors.Wrap(err, "validating params")
+ return params.Organization{}, fmt.Errorf("error validating params: %w", err)
}
- creds, ok := r.credentials[param.CredentialsName]
- if !ok {
+ var creds params.ForgeCredentials
+ switch param.ForgeType {
+ case params.GithubEndpointType:
+ slog.DebugContext(ctx, "getting github credentials")
+ creds, err = r.store.GetGithubCredentialsByName(ctx, param.CredentialsName, true)
+ case params.GiteaEndpointType:
+ slog.DebugContext(ctx, "getting gitea credentials")
+ creds, err = r.store.GetGiteaCredentialsByName(ctx, param.CredentialsName, true)
+ default:
+ creds, err = r.ResolveForgeCredentialByName(ctx, param.CredentialsName)
+ }
+
+ if err != nil {
return params.Organization{}, runnerErrors.NewBadRequestError("credentials %s not defined", param.CredentialsName)
}
- _, err = r.store.GetOrganization(ctx, param.Name)
+ _, err = r.store.GetOrganization(ctx, param.Name, creds.Endpoint.Name)
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Organization{}, errors.Wrap(err, "fetching org")
+ return params.Organization{}, fmt.Errorf("error fetching org: %w", err)
}
} else {
return params.Organization{}, runnerErrors.NewConflictError("organization %s already exists", param.Name)
}
- org, err = r.store.CreateOrganization(ctx, param.Name, creds.Name, param.WebhookSecret)
+ org, err = r.store.CreateOrganization(ctx, param.Name, creds, param.WebhookSecret, param.PoolBalancerType)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "creating organization")
+ return params.Organization{}, fmt.Errorf("error creating organization: %w", err)
}
defer func() {
if err != nil {
if deleteErr := r.store.DeleteOrganization(ctx, org.ID); deleteErr != nil {
- log.Printf("failed to delete org: %s", deleteErr)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to delete org",
+ "org_id", org.ID)
}
}
}()
+ // Use the admin context in the pool manager. Any access control is already done above when
+ // updating the store.
poolMgr, err := r.poolManagerCtrl.CreateOrgPoolManager(r.ctx, org, r.providers, r.store)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "creating org pool manager")
+ return params.Organization{}, fmt.Errorf("error creating org pool manager: %w", err)
}
if err := poolMgr.Start(); err != nil {
if deleteErr := r.poolManagerCtrl.DeleteOrgPoolManager(org); deleteErr != nil {
- log.Printf("failed to cleanup pool manager for org %s", org.ID)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to cleanup pool manager for org",
+ "org_id", org.ID)
}
- return params.Organization{}, errors.Wrap(err, "starting org pool manager")
+ return params.Organization{}, fmt.Errorf("error starting org pool manager: %w", err)
}
return org, nil
}
-func (r *Runner) ListOrganizations(ctx context.Context) ([]params.Organization, error) {
+func (r *Runner) ListOrganizations(ctx context.Context, filter params.OrganizationFilter) ([]params.Organization, error) {
if !auth.IsAdmin(ctx) {
return nil, runnerErrors.ErrUnauthorized
}
- orgs, err := r.store.ListOrganizations(ctx)
+ orgs, err := r.store.ListOrganizations(ctx, filter)
if err != nil {
- return nil, errors.Wrap(err, "listing organizations")
+ return nil, fmt.Errorf("error listing organizations: %w", err)
}
var allOrgs []params.Organization
@@ -112,7 +128,7 @@ func (r *Runner) GetOrganizationByID(ctx context.Context, orgID string) (params.
org, err := r.store.GetOrganizationByID(ctx, orgID)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching organization")
+ return params.Organization{}, fmt.Errorf("error fetching organization: %w", err)
}
poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
@@ -124,41 +140,70 @@ func (r *Runner) GetOrganizationByID(ctx context.Context, orgID string) (params.
return org, nil
}
-func (r *Runner) DeleteOrganization(ctx context.Context, orgID string) error {
+func (r *Runner) DeleteOrganization(ctx context.Context, orgID string, keepWebhook bool) error {
if !auth.IsAdmin(ctx) {
return runnerErrors.ErrUnauthorized
}
org, err := r.store.GetOrganizationByID(ctx, orgID)
if err != nil {
- return errors.Wrap(err, "fetching org")
+ return fmt.Errorf("error fetching org: %w", err)
}
- pools, err := r.store.ListOrgPools(ctx, orgID)
+ entity, err := org.GetEntity()
if err != nil {
- return errors.Wrap(err, "fetching org pools")
+ return fmt.Errorf("error getting entity: %w", err)
+ }
+
+ pools, err := r.store.ListEntityPools(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching org pools: %w", err)
}
if len(pools) > 0 {
- poolIds := []string{}
+ poolIDs := []string{}
for _, pool := range pools {
- poolIds = append(poolIds, pool.ID)
+ poolIDs = append(poolIDs, pool.ID)
}
- return runnerErrors.NewBadRequestError("org has pools defined (%s)", strings.Join(poolIds, ", "))
+ return runnerErrors.NewBadRequestError("org has pools defined (%s)", strings.Join(poolIDs, ", "))
+ }
+
+ scaleSets, err := r.store.ListEntityScaleSets(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching organization scale sets: %w", err)
+ }
+
+ if len(scaleSets) > 0 {
+ return runnerErrors.NewBadRequestError("organization has scale sets defined; delete them first")
+ }
+
+ if !keepWebhook && r.config.Default.EnableWebhookManagement {
+ poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
+ if err != nil {
+ return fmt.Errorf("error fetching pool manager: %w", err)
+ }
+
+ if err := poolMgr.UninstallWebhook(ctx); err != nil {
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): Should we error out here?
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to uninstall webhook",
+ "org_id", org.ID)
+ }
}
if err := r.poolManagerCtrl.DeleteOrgPoolManager(org); err != nil {
- return errors.Wrap(err, "deleting org pool manager")
+ return fmt.Errorf("error deleting org pool manager: %w", err)
}
if err := r.store.DeleteOrganization(ctx, orgID); err != nil {
- return errors.Wrapf(err, "removing organization %s", orgID)
+ return fmt.Errorf("error removing organization %s: %w", orgID, err)
}
return nil
}
-func (r *Runner) UpdateOrganization(ctx context.Context, orgID string, param params.UpdateRepositoryParams) (params.Organization, error) {
+func (r *Runner) UpdateOrganization(ctx context.Context, orgID string, param params.UpdateEntityParams) (params.Organization, error) {
if !auth.IsAdmin(ctx) {
return params.Organization{}, runnerErrors.ErrUnauthorized
}
@@ -166,38 +211,23 @@ func (r *Runner) UpdateOrganization(ctx context.Context, orgID string, param par
r.mux.Lock()
defer r.mux.Unlock()
- org, err := r.store.GetOrganizationByID(ctx, orgID)
- if err != nil {
- return params.Organization{}, errors.Wrap(err, "fetching org")
+ switch param.PoolBalancerType {
+ case params.PoolBalancerTypeRoundRobin, params.PoolBalancerTypePack, params.PoolBalancerTypeNone:
+ default:
+ return params.Organization{}, runnerErrors.NewBadRequestError("invalid pool balancer type: %s", param.PoolBalancerType)
}
- if param.CredentialsName != "" {
- // Check that credentials are set before saving to db
- if _, ok := r.credentials[param.CredentialsName]; !ok {
- return params.Organization{}, runnerErrors.NewBadRequestError("invalid credentials (%s) for org %s", param.CredentialsName, org.Name)
- }
- }
-
- org, err = r.store.UpdateOrganization(ctx, orgID, param)
+ org, err := r.store.UpdateOrganization(ctx, orgID, param)
if err != nil {
- return params.Organization{}, errors.Wrap(err, "updating org")
+ return params.Organization{}, fmt.Errorf("error updating org: %w", err)
}
poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
if err != nil {
- newState := params.UpdatePoolStateParams{
- WebhookSecret: org.WebhookSecret,
- }
- // stop the pool mgr
- if err := poolMgr.RefreshState(newState); err != nil {
- return params.Organization{}, errors.Wrap(err, "updating org pool manager")
- }
- } else {
- if _, err := r.poolManagerCtrl.CreateOrgPoolManager(r.ctx, org, r.providers, r.store); err != nil {
- return params.Organization{}, errors.Wrap(err, "creating org pool manager")
- }
+ return params.Organization{}, fmt.Errorf("failed to get org pool manager: %w", err)
}
+ org.PoolManagerStatus = poolMgr.Status()
return org, nil
}
@@ -206,30 +236,23 @@ func (r *Runner) CreateOrgPool(ctx context.Context, orgID string, param params.C
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- r.mux.Lock()
- defer r.mux.Unlock()
-
- org, err := r.store.GetOrganizationByID(ctx, orgID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching org")
- }
-
- if _, err := r.poolManagerCtrl.GetOrgPoolManager(org); err != nil {
- return params.Pool{}, runnerErrors.ErrNotFound
- }
-
createPoolParams, err := r.appendTagsToCreatePoolParams(param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool params")
+ return params.Pool{}, fmt.Errorf("error fetching pool params: %w", err)
}
if param.RunnerBootstrapTimeout == 0 {
param.RunnerBootstrapTimeout = appdefaults.DefaultRunnerBootstrapTimeout
}
- pool, err := r.store.CreateOrganizationPool(ctx, orgID, createPoolParams)
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+
+ pool, err := r.store.CreateEntityPool(ctx, entity, createPoolParams)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "creating pool")
+ return params.Pool{}, fmt.Errorf("error creating pool: %w", err)
}
return pool, nil
@@ -240,10 +263,16 @@ func (r *Runner) GetOrgPoolByID(ctx context.Context, orgID, poolID string) (para
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetOrganizationPool(ctx, orgID, poolID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
}
+
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
+ }
+
return pool, nil
}
@@ -252,28 +281,31 @@ func (r *Runner) DeleteOrgPool(ctx context.Context, orgID, poolID string) error
return runnerErrors.ErrUnauthorized
}
- // TODO: dedup instance count verification
- pool, err := r.store.GetOrganizationPool(ctx, orgID, poolID)
- if err != nil {
- return errors.Wrap(err, "fetching pool")
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
}
- instances, err := r.store.ListPoolInstances(ctx, pool.ID)
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return errors.Wrap(err, "fetching instances")
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ return fmt.Errorf("error fetching pool: %w", err)
+ }
+ return nil
}
+ // nolint:golangci-lint,godox
// TODO: implement a count function
- if len(instances) > 0 {
+ if len(pool.Instances) > 0 {
runnerIDs := []string{}
- for _, run := range instances {
+ for _, run := range pool.Instances {
runnerIDs = append(runnerIDs, run.ID)
}
return runnerErrors.NewBadRequestError("pool has runners: %s", strings.Join(runnerIDs, ", "))
}
- if err := r.store.DeleteOrganizationPool(ctx, orgID, poolID); err != nil {
- return errors.Wrap(err, "deleting pool")
+ if err := r.store.DeleteEntityPool(ctx, entity, poolID); err != nil {
+ return fmt.Errorf("error deleting pool: %w", err)
}
return nil
}
@@ -282,10 +314,13 @@ func (r *Runner) ListOrgPools(ctx context.Context, orgID string) ([]params.Pool,
if !auth.IsAdmin(ctx) {
return []params.Pool{}, runnerErrors.ErrUnauthorized
}
-
- pools, err := r.store.ListOrgPools(ctx, orgID)
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pools, err := r.store.ListEntityPools(ctx, entity)
if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
+ return nil, fmt.Errorf("error fetching pools: %w", err)
}
return pools, nil
}
@@ -295,9 +330,14 @@ func (r *Runner) UpdateOrgPool(ctx context.Context, orgID, poolID string, param
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetOrganizationPool(ctx, orgID, poolID)
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
maxRunners := pool.MaxRunners
@@ -314,9 +354,9 @@ func (r *Runner) UpdateOrgPool(ctx context.Context, orgID, poolID string, param
return params.Pool{}, runnerErrors.NewBadRequestError("min_idle_runners cannot be larger than max_runners")
}
- newPool, err := r.store.UpdateOrganizationPool(ctx, orgID, poolID, param)
+ newPool, err := r.store.UpdateEntityPool(ctx, entity, poolID, param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "updating pool")
+ return params.Pool{}, fmt.Errorf("error updating pool: %w", err)
}
return newPool, nil
}
@@ -326,25 +366,95 @@ func (r *Runner) ListOrgInstances(ctx context.Context, orgID string) ([]params.I
return nil, runnerErrors.ErrUnauthorized
}
- instances, err := r.store.ListOrgInstances(ctx, orgID)
+ entity := params.ForgeEntity{
+ ID: orgID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+
+ instances, err := r.store.ListEntityInstances(ctx, entity)
if err != nil {
- return []params.Instance{}, errors.Wrap(err, "fetching instances")
+ return []params.Instance{}, fmt.Errorf("error fetching instances: %w", err)
}
return instances, nil
}
-func (r *Runner) findOrgPoolManager(name string) (common.PoolManager, error) {
+func (r *Runner) findOrgPoolManager(name, endpointName string) (common.PoolManager, error) {
r.mux.Lock()
defer r.mux.Unlock()
- org, err := r.store.GetOrganization(r.ctx, name)
+ org, err := r.store.GetOrganization(r.ctx, name, endpointName)
if err != nil {
- return nil, errors.Wrap(err, "fetching org")
+ return nil, fmt.Errorf("error fetching org: %w", err)
}
poolManager, err := r.poolManagerCtrl.GetOrgPoolManager(org)
if err != nil {
- return nil, errors.Wrap(err, "fetching pool manager for org")
+ return nil, fmt.Errorf("error fetching pool manager for org: %w", err)
}
return poolManager, nil
}
+
+func (r *Runner) InstallOrgWebhook(ctx context.Context, orgID string, param params.InstallWebhookParams) (params.HookInfo, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.HookInfo{}, runnerErrors.ErrUnauthorized
+ }
+
+ org, err := r.store.GetOrganizationByID(ctx, orgID)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching org: %w", err)
+ }
+
+ poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching pool manager for org: %w", err)
+ }
+
+ info, err := poolMgr.InstallWebhook(ctx, param)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error installing webhook: %w", err)
+ }
+ return info, nil
+}
+
+func (r *Runner) UninstallOrgWebhook(ctx context.Context, orgID string) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ org, err := r.store.GetOrganizationByID(ctx, orgID)
+ if err != nil {
+ return fmt.Errorf("error fetching org: %w", err)
+ }
+
+ poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
+ if err != nil {
+ return fmt.Errorf("error fetching pool manager for org: %w", err)
+ }
+
+ if err := poolMgr.UninstallWebhook(ctx); err != nil {
+ return fmt.Errorf("error uninstalling webhook: %w", err)
+ }
+ return nil
+}
+
+func (r *Runner) GetOrgWebhookInfo(ctx context.Context, orgID string) (params.HookInfo, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.HookInfo{}, runnerErrors.ErrUnauthorized
+ }
+
+ org, err := r.store.GetOrganizationByID(ctx, orgID)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching org: %w", err)
+ }
+
+ poolMgr, err := r.poolManagerCtrl.GetOrgPoolManager(org)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching pool manager for org: %w", err)
+ }
+
+ info, err := poolMgr.GetWebhookInfo(ctx)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching webhook info: %w", err)
+ }
+ return info, nil
+}
diff --git a/runner/organizations_test.go b/runner/organizations_test.go
index 0ce67a97..8d2aa3f6 100644
--- a/runner/organizations_test.go
+++ b/runner/organizations_test.go
@@ -16,59 +16,69 @@ package runner
import (
"context"
+ "errors"
"fmt"
"testing"
- "github.com/cloudbase/garm/auth"
- "github.com/cloudbase/garm/config"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/suite"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/database"
dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
garmTesting "github.com/cloudbase/garm/internal/testing"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
runnerCommonMocks "github.com/cloudbase/garm/runner/common/mocks"
runnerMocks "github.com/cloudbase/garm/runner/mocks"
-
- "github.com/stretchr/testify/mock"
- "github.com/stretchr/testify/suite"
)
type OrgTestFixtures struct {
- AdminContext context.Context
- DBFile string
- Store dbCommon.Store
- StoreOrgs map[string]params.Organization
- Providers map[string]common.Provider
- Credentials map[string]config.Github
- CreateOrgParams params.CreateOrgParams
- CreatePoolParams params.CreatePoolParams
- CreateInstanceParams params.CreateInstanceParams
- UpdateRepoParams params.UpdateRepositoryParams
- UpdatePoolParams params.UpdatePoolParams
- UpdatePoolStateParams params.UpdatePoolStateParams
- ErrMock error
- ProviderMock *runnerCommonMocks.Provider
- PoolMgrMock *runnerCommonMocks.PoolManager
- PoolMgrCtrlMock *runnerMocks.PoolManagerController
+ AdminContext context.Context
+ DBFile string
+ Store dbCommon.Store
+ StoreOrgs map[string]params.Organization
+ Providers map[string]common.Provider
+ Credentials map[string]params.ForgeCredentials
+ CreateOrgParams params.CreateOrgParams
+ CreatePoolParams params.CreatePoolParams
+ CreateInstanceParams params.CreateInstanceParams
+ UpdateRepoParams params.UpdateEntityParams
+ UpdatePoolParams params.UpdatePoolParams
+ ErrMock error
+ ProviderMock *runnerCommonMocks.Provider
+ PoolMgrMock *runnerCommonMocks.PoolManager
+ PoolMgrCtrlMock *runnerMocks.PoolManagerController
}
type OrgTestSuite struct {
suite.Suite
Fixtures *OrgTestFixtures
Runner *Runner
+
+ testCreds params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ giteaTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
+ giteaEndpoint params.ForgeEndpoint
}
func (s *OrgTestSuite) SetupTest() {
- adminCtx := auth.GetAdminContext()
-
// create testing sqlite database
dbCfg := garmTesting.GetTestSqliteDBConfig(s.T())
- db, err := database.NewDatabase(adminCtx, dbCfg)
+ db, err := database.NewDatabase(context.Background(), dbCfg)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.giteaEndpoint = garmTesting.CreateDefaultGiteaEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.giteaTestCreds = garmTesting.CreateTestGiteaCredentials(adminCtx, "gitea-creds", db, s.T(), s.giteaEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+
// create some organization objects in the database, for testing purposes
orgs := map[string]params.Organization{}
for i := 1; i <= 3; i++ {
@@ -76,8 +86,9 @@ func (s *OrgTestSuite) SetupTest() {
org, err := db.CreateOrganization(
adminCtx,
name,
- fmt.Sprintf("test-creds-%v", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%v", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create database object (test-org-%v)", i))
@@ -97,16 +108,13 @@ func (s *OrgTestSuite) SetupTest() {
Providers: map[string]common.Provider{
"test-provider": providerMock,
},
- Credentials: map[string]config.Github{
- "test-creds": {
- Name: "test-creds-name",
- Description: "test-creds-description",
- OAuth2Token: "test-creds-oauth2-token",
- },
+ Credentials: map[string]params.ForgeCredentials{
+ s.testCreds.Name: s.testCreds,
+ s.secondaryTestCreds.Name: s.secondaryTestCreds,
},
CreateOrgParams: params.CreateOrgParams{
Name: "test-org-create",
- CredentialsName: "test-creds",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-create-org-webhook-secret",
},
CreatePoolParams: params.CreatePoolParams{
@@ -117,15 +125,15 @@ func (s *OrgTestSuite) SetupTest() {
Flavor: "test",
OSType: "linux",
OSArch: "arm64",
- Tags: []string{"self-hosted", "arm64", "linux"},
+ Tags: []string{"arm64-linux-runner"},
RunnerBootstrapTimeout: 0,
},
CreateInstanceParams: params.CreateInstanceParams{
Name: "test-instance-name",
OSType: "linux",
},
- UpdateRepoParams: params.UpdateRepositoryParams{
- CredentialsName: "test-creds",
+ UpdateRepoParams: params.UpdateEntityParams{
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-update-repo-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -134,9 +142,6 @@ func (s *OrgTestSuite) SetupTest() {
Image: "test-images-updated",
Flavor: "test-flavor-updated",
},
- UpdatePoolStateParams: params.UpdatePoolStateParams{
- WebhookSecret: "test-update-repo-webhook-secret",
- },
ErrMock: fmt.Errorf("mock error"),
ProviderMock: providerMock,
PoolMgrMock: runnerCommonMocks.NewPoolManager(s.T()),
@@ -147,7 +152,6 @@ func (s *OrgTestSuite) SetupTest() {
// setup test runner
runner := &Runner{
providers: fixtures.Providers,
- credentials: fixtures.Credentials,
ctx: fixtures.AdminContext,
store: fixtures.Store,
poolManagerCtrl: fixtures.PoolMgrCtrlMock,
@@ -168,7 +172,21 @@ func (s *OrgTestSuite) TestCreateOrganization() {
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.CreateOrgParams.Name, org.Name)
- s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateOrgParams.CredentialsName].Name, org.CredentialsName)
+ s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateOrgParams.CredentialsName].Name, org.Credentials.Name)
+ s.Require().Equal(params.PoolBalancerTypeRoundRobin, org.PoolBalancerType)
+}
+
+func (s *OrgTestSuite) TestCreateOrganizationPoolBalancerTypePack() {
+ s.Fixtures.CreateOrgParams.PoolBalancerType = params.PoolBalancerTypePack
+ s.Fixtures.PoolMgrMock.On("Start").Return(nil)
+ s.Fixtures.PoolMgrCtrlMock.On("CreateOrgPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Organization"), s.Fixtures.Providers, s.Fixtures.Store).Return(s.Fixtures.PoolMgrMock, nil)
+
+ org, err := s.Runner.CreateOrganization(s.Fixtures.AdminContext, s.Fixtures.CreateOrgParams)
+
+ s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
+ s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
+ s.Require().Nil(err)
+ s.Require().Equal(params.PoolBalancerTypePack, org.PoolBalancerType)
}
func (s *OrgTestSuite) TestCreateOrganizationErrUnauthorized() {
@@ -184,7 +202,7 @@ func (s *OrgTestSuite) TestCreateOrganizationEmptyParams() {
}
func (s *OrgTestSuite) TestCreateOrganizationMissingCredentials() {
- s.Fixtures.CreateOrgParams.CredentialsName = "not-existent-creds-name"
+ s.Fixtures.CreateOrgParams.CredentialsName = notExistingCredentialsName
_, err := s.Runner.CreateOrganization(s.Fixtures.AdminContext, s.Fixtures.CreateOrgParams)
@@ -206,7 +224,7 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("creating org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error creating org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *OrgTestSuite) TestCreateOrganizationStartPoolMgrFailed() {
@@ -218,20 +236,80 @@ func (s *OrgTestSuite) TestCreateOrganizationStartPoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("starting org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error starting org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *OrgTestSuite) TestListOrganizations() {
s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
- orgs, err := s.Runner.ListOrganizations(s.Fixtures.AdminContext)
+ orgs, err := s.Runner.ListOrganizations(s.Fixtures.AdminContext, params.OrganizationFilter{})
s.Require().Nil(err)
garmTesting.EqualDBEntityByName(s.T(), garmTesting.DBEntityMapToSlice(s.Fixtures.StoreOrgs), orgs)
}
+func (s *OrgTestSuite) TestListOrganizationsWithFilter() {
+ s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
+
+ org, err := s.Fixtures.Store.CreateOrganization(
+ s.Fixtures.AdminContext,
+ "test-org",
+ s.testCreds,
+ "super-secret",
+ params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+
+ org2, err := s.Fixtures.Store.CreateOrganization(
+ s.Fixtures.AdminContext,
+ "test-org",
+ s.giteaTestCreds,
+ "super-secret",
+ params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+
+ org3, err := s.Fixtures.Store.CreateOrganization(
+ s.Fixtures.AdminContext,
+ "test-org2",
+ s.giteaTestCreds,
+ "super-secret",
+ params.PoolBalancerTypeRoundRobin)
+ s.Require().NoError(err)
+
+ orgs, err := s.Runner.ListOrganizations(
+ s.Fixtures.AdminContext,
+ params.OrganizationFilter{
+ Name: "test-org",
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org, org2}, orgs)
+
+ orgs, err = s.Runner.ListOrganizations(
+ s.Fixtures.AdminContext,
+ params.OrganizationFilter{
+ Name: "test-org",
+ Endpoint: s.giteaEndpoint.Name,
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org2}, orgs)
+
+ orgs, err = s.Runner.ListOrganizations(
+ s.Fixtures.AdminContext,
+ params.OrganizationFilter{
+ Name: "test-org2",
+ },
+ )
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Organization{org3}, orgs)
+}
+
func (s *OrgTestSuite) TestListOrganizationsErrUnauthorized() {
- _, err := s.Runner.ListOrganizations(context.Background())
+ _, err := s.Runner.ListOrganizations(context.Background(), params.OrganizationFilter{})
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
@@ -254,28 +332,32 @@ func (s *OrgTestSuite) TestGetOrganizationByIDErrUnauthorized() {
func (s *OrgTestSuite) TestDeleteOrganization() {
s.Fixtures.PoolMgrCtrlMock.On("DeleteOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(nil)
- err := s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-3"].ID)
+ err := s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-3"].ID, true)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
s.Require().Nil(err)
_, err = s.Fixtures.Store.GetOrganizationByID(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-3"].ID)
- s.Require().Equal("fetching org: not found", err.Error())
+ s.Require().Equal("error fetching org: not found", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrganizationErrUnauthorized() {
- err := s.Runner.DeleteOrganization(context.Background(), "dummy-org-id")
+ err := s.Runner.DeleteOrganization(context.Background(), "dummy-org-id", true)
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
func (s *OrgTestSuite) TestDeleteOrganizationPoolDefinedFailed() {
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create store organizations pool: %v", err))
}
- err = s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID)
+ err = s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, true)
s.Require().Equal(runnerErrors.NewBadRequestError("org has pools defined (%s)", pool.ID), err)
}
@@ -283,25 +365,40 @@ func (s *OrgTestSuite) TestDeleteOrganizationPoolDefinedFailed() {
func (s *OrgTestSuite) TestDeleteOrganizationPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("DeleteOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.ErrMock)
- err := s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID)
+ err := s.Runner.DeleteOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, true)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("deleting org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error deleting org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *OrgTestSuite) TestUpdateOrganization() {
s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
- s.Fixtures.PoolMgrCtrlMock.On("CreateOrgPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Organization"), s.Fixtures.Providers, s.Fixtures.Store).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
org, err := s.Runner.UpdateOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.UpdateRepoParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, org.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, org.WebhookSecret)
}
+func (s *OrgTestSuite) TestUpdateRepositoryBalancingType() {
+ s.Fixtures.UpdateRepoParams.PoolBalancerType = params.PoolBalancerTypePack
+ s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
+
+ param := s.Fixtures.UpdateRepoParams
+ param.PoolBalancerType = params.PoolBalancerTypePack
+ org, err := s.Runner.UpdateOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, param)
+
+ s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
+ s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
+ s.Require().Nil(err)
+ s.Require().Equal(params.PoolBalancerTypePack, org.PoolBalancerType)
+}
+
func (s *OrgTestSuite) TestUpdateOrganizationErrUnauthorized() {
_, err := s.Runner.UpdateOrganization(context.Background(), "dummy-org-id", s.Fixtures.UpdateRepoParams)
@@ -309,38 +406,33 @@ func (s *OrgTestSuite) TestUpdateOrganizationErrUnauthorized() {
}
func (s *OrgTestSuite) TestUpdateOrganizationInvalidCreds() {
- s.Fixtures.UpdateRepoParams.CredentialsName = "invalid-creds-name"
+ s.Fixtures.UpdateRepoParams.CredentialsName = invalidCredentialsName
_, err := s.Runner.UpdateOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.UpdateRepoParams)
-
- s.Require().Equal(runnerErrors.NewBadRequestError("invalid credentials (%s) for org %s", s.Fixtures.UpdateRepoParams.CredentialsName, s.Fixtures.StoreOrgs["test-org-1"].Name), err)
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.FailNow(fmt.Sprintf("expected error: %v", runnerErrors.ErrNotFound))
+ }
}
func (s *OrgTestSuite) TestUpdateOrganizationPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
- s.Fixtures.PoolMgrMock.On("RefreshState", s.Fixtures.UpdatePoolStateParams).Return(s.Fixtures.ErrMock)
_, err := s.Runner.UpdateOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.UpdateRepoParams)
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("updating org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("failed to get org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *OrgTestSuite) TestUpdateOrganizationCreateOrgPoolMgrFailed() {
- s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
- s.Fixtures.PoolMgrCtrlMock.On("CreateOrgPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Organization"), s.Fixtures.Providers, s.Fixtures.Store).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
+ s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
_, err := s.Runner.UpdateOrganization(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.UpdateRepoParams)
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("creating org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("failed to get org pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *OrgTestSuite) TestCreateOrgPool() {
- s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
-
pool, err := s.Runner.CreateOrgPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
@@ -364,21 +456,8 @@ func (s *OrgTestSuite) TestCreateOrgPoolErrUnauthorized() {
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
-func (s *OrgTestSuite) TestCreateOrgPoolErrNotFound() {
- s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, runnerErrors.ErrNotFound)
-
- _, err := s.Runner.CreateOrgPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
-
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
- s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(runnerErrors.ErrNotFound, err)
-}
-
func (s *OrgTestSuite) TestCreateOrgPoolFetchPoolParamsFailed() {
- s.Fixtures.CreatePoolParams.ProviderName = "not-existent-provider-name"
-
- s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
-
+ s.Fixtures.CreatePoolParams.ProviderName = notExistingProviderName
_, err := s.Runner.CreateOrgPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
@@ -387,7 +466,11 @@ func (s *OrgTestSuite) TestCreateOrgPoolFetchPoolParamsFailed() {
}
func (s *OrgTestSuite) TestGetOrgPoolByID() {
- orgPool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ orgPool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %s", err))
}
@@ -405,7 +488,11 @@ func (s *OrgTestSuite) TestGetOrgPoolByIDErrUnauthorized() {
}
func (s *OrgTestSuite) TestDeleteOrgPool() {
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %s", err))
}
@@ -414,8 +501,8 @@ func (s *OrgTestSuite) TestDeleteOrgPool() {
s.Require().Nil(err)
- _, err = s.Fixtures.Store.GetOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Fixtures.Store.GetEntityPool(s.Fixtures.AdminContext, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *OrgTestSuite) TestDeleteOrgPoolErrUnauthorized() {
@@ -425,7 +512,11 @@ func (s *OrgTestSuite) TestDeleteOrgPoolErrUnauthorized() {
}
func (s *OrgTestSuite) TestDeleteOrgPoolRunnersFailed() {
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
@@ -440,10 +531,14 @@ func (s *OrgTestSuite) TestDeleteOrgPoolRunnersFailed() {
}
func (s *OrgTestSuite) TestListOrgPools() {
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
orgPools := []params.Pool{}
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Image = fmt.Sprintf("test-org-%v", i)
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
@@ -463,7 +558,11 @@ func (s *OrgTestSuite) TestListOrgPoolsErrUnauthorized() {
}
func (s *OrgTestSuite) TestUpdateOrgPool() {
- orgPool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ orgPool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %s", err))
}
@@ -482,7 +581,11 @@ func (s *OrgTestSuite) TestUpdateOrgPoolErrUnauthorized() {
}
func (s *OrgTestSuite) TestUpdateOrgPoolMinIdleGreaterThanMax() {
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %s", err))
}
@@ -497,7 +600,11 @@ func (s *OrgTestSuite) TestUpdateOrgPoolMinIdleGreaterThanMax() {
}
func (s *OrgTestSuite) TestListOrgInstances() {
- pool, err := s.Fixtures.Store.CreateOrganizationPool(s.Fixtures.AdminContext, s.Fixtures.StoreOrgs["test-org-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreOrgs["test-org-1"].ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create org pool: %v", err))
}
@@ -526,7 +633,7 @@ func (s *OrgTestSuite) TestListOrgInstancesErrUnauthorized() {
func (s *OrgTestSuite) TestFindOrgPoolManager() {
s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, nil)
- poolManager, err := s.Runner.findOrgPoolManager(s.Fixtures.StoreOrgs["test-org-1"].Name)
+ poolManager, err := s.Runner.findOrgPoolManager(s.Fixtures.StoreOrgs["test-org-1"].Name, s.Fixtures.StoreOrgs["test-org-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
@@ -537,7 +644,7 @@ func (s *OrgTestSuite) TestFindOrgPoolManager() {
func (s *OrgTestSuite) TestFindOrgPoolManagerFetchPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("GetOrgPoolManager", mock.AnythingOfType("params.Organization")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
- _, err := s.Runner.findOrgPoolManager(s.Fixtures.StoreOrgs["test-org-1"].Name)
+ _, err := s.Runner.findOrgPoolManager(s.Fixtures.StoreOrgs["test-org-1"].Name, s.Fixtures.StoreOrgs["test-org-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
diff --git a/runner/pool/cache.go b/runner/pool/cache.go
new file mode 100644
index 00000000..5a3a3c8c
--- /dev/null
+++ b/runner/pool/cache.go
@@ -0,0 +1,75 @@
+package pool
+
+import (
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+type poolCacheStore interface {
+ Next() (params.Pool, error)
+ Reset()
+ Len() int
+}
+
+type poolRoundRobin struct {
+ pools []params.Pool
+ next uint32
+}
+
+func (p *poolRoundRobin) Next() (params.Pool, error) {
+ if len(p.pools) == 0 {
+ return params.Pool{}, runnerErrors.ErrNoPoolsAvailable
+ }
+
+ n := atomic.AddUint32(&p.next, 1)
+ return p.pools[(int(n)-1)%len(p.pools)], nil
+}
+
+func (p *poolRoundRobin) Len() int {
+ return len(p.pools)
+}
+
+func (p *poolRoundRobin) Reset() {
+ atomic.StoreUint32(&p.next, 0)
+}
+
+type poolsForTags struct {
+ pools sync.Map
+ poolCacheType params.PoolBalancerType
+}
+
+func (p *poolsForTags) Get(tags []string) (poolCacheStore, bool) {
+ sort.Strings(tags)
+ key := strings.Join(tags, "^")
+
+ v, ok := p.pools.Load(key)
+ if !ok {
+ return nil, false
+ }
+ poolCache := v.(*poolRoundRobin)
+ if p.poolCacheType == params.PoolBalancerTypePack {
+ // When we service a list of jobs, we want to try each pool in turn
+ // for each job. Pools are sorted by priority so we always start from the
+ // highest priority pool and move on to the next if the first one is full.
+ poolCache.Reset()
+ }
+ return poolCache, true
+}
+
+func (p *poolsForTags) Add(tags []string, pools []params.Pool) poolCacheStore {
+ sort.Slice(pools, func(i, j int) bool {
+ return pools[i].Priority > pools[j].Priority
+ })
+
+ sort.Strings(tags)
+ key := strings.Join(tags, "^")
+
+ poolRR := &poolRoundRobin{pools: pools}
+ v, _ := p.pools.LoadOrStore(key, poolRR)
+ return v.(*poolRoundRobin)
+}
diff --git a/runner/pool/common.go b/runner/pool/common.go
new file mode 100644
index 00000000..a41e034d
--- /dev/null
+++ b/runner/pool/common.go
@@ -0,0 +1,28 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package pool
+
+type RunnerLabels struct {
+ ID int64 `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Type string `json:"type,omitempty"`
+}
+
+type forgeRunner struct {
+ ID int64 `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Status string `json:"status,omitempty"`
+ Labels []RunnerLabels `json:"labels,omitempty"`
+}
diff --git a/runner/pool/enterprise.go b/runner/pool/enterprise.go
deleted file mode 100644
index 9e2a258c..00000000
--- a/runner/pool/enterprise.go
+++ /dev/null
@@ -1,218 +0,0 @@
-package pool
-
-import (
- "context"
- "fmt"
- "net/http"
- "strings"
- "sync"
-
- dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/common"
- "github.com/cloudbase/garm/util"
-
- "github.com/google/go-github/v48/github"
- "github.com/pkg/errors"
-)
-
-// test that we implement PoolManager
-var _ poolHelper = &enterprise{}
-
-func NewEnterprisePoolManager(ctx context.Context, cfg params.Enterprise, cfgInternal params.Internal, providers map[string]common.Provider, store dbCommon.Store) (common.PoolManager, error) {
- ghc, ghEnterpriseClient, err := util.GithubClient(ctx, cfgInternal.OAuth2Token, cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return nil, errors.Wrap(err, "getting github client")
- }
-
- helper := &enterprise{
- cfg: cfg,
- cfgInternal: cfgInternal,
- ctx: ctx,
- ghcli: ghc,
- ghcEnterpriseCli: ghEnterpriseClient,
- id: cfg.ID,
- store: store,
- }
-
- repo := &basePoolManager{
- ctx: ctx,
- store: store,
- providers: providers,
- controllerID: cfgInternal.ControllerID,
- quit: make(chan struct{}),
- done: make(chan struct{}),
- helper: helper,
- credsDetails: cfgInternal.GithubCredentialsDetails,
- }
- return repo, nil
-}
-
-type enterprise struct {
- cfg params.Enterprise
- cfgInternal params.Internal
- ctx context.Context
- ghcli common.GithubClient
- ghcEnterpriseCli common.GithubEnterpriseClient
- id string
- store dbCommon.Store
-
- mux sync.Mutex
-}
-
-func (r *enterprise) GetRunnerInfoFromWorkflow(job params.WorkflowJob) (params.RunnerInfo, error) {
- if err := r.ValidateOwner(job); err != nil {
- return params.RunnerInfo{}, errors.Wrap(err, "validating owner")
- }
- workflow, ghResp, err := r.ghcli.GetWorkflowJobByID(r.ctx, job.Repository.Owner.Login, job.Repository.Name, job.WorkflowJob.ID)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return params.RunnerInfo{}, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching workflow info")
- }
- return params.RunnerInfo{}, errors.Wrap(err, "fetching workflow info")
- }
-
- if workflow.RunnerName != nil {
- return params.RunnerInfo{
- Name: *workflow.RunnerName,
- Labels: workflow.Labels,
- }, nil
- }
- return params.RunnerInfo{}, fmt.Errorf("failed to find runner name from workflow")
-}
-
-func (r *enterprise) UpdateState(param params.UpdatePoolStateParams) error {
- r.mux.Lock()
- defer r.mux.Unlock()
-
- r.cfg.WebhookSecret = param.WebhookSecret
-
- ghc, ghcEnterprise, err := util.GithubClient(r.ctx, r.GetGithubToken(), r.cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return errors.Wrap(err, "getting github client")
- }
- r.ghcli = ghc
- r.ghcEnterpriseCli = ghcEnterprise
- return nil
-}
-
-func (r *enterprise) GetGithubToken() string {
- return r.cfgInternal.OAuth2Token
-}
-
-func (r *enterprise) GetGithubRunners() ([]*github.Runner, error) {
- opts := github.ListOptions{
- PerPage: 100,
- }
-
- var allRunners []*github.Runner
- for {
- runners, ghResp, err := r.ghcEnterpriseCli.ListRunners(r.ctx, r.cfg.Name, &opts)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching runners")
- }
- return nil, errors.Wrap(err, "fetching runners")
- }
- allRunners = append(allRunners, runners.Runners...)
- if ghResp.NextPage == 0 {
- break
- }
- opts.Page = ghResp.NextPage
- }
- return allRunners, nil
-}
-
-func (r *enterprise) FetchTools() ([]*github.RunnerApplicationDownload, error) {
- r.mux.Lock()
- defer r.mux.Unlock()
- tools, ghResp, err := r.ghcEnterpriseCli.ListRunnerApplicationDownloads(r.ctx, r.cfg.Name)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching runners")
- }
- return nil, errors.Wrap(err, "fetching runner tools")
- }
-
- return tools, nil
-}
-
-func (r *enterprise) FetchDbInstances() ([]params.Instance, error) {
- return r.store.ListEnterpriseInstances(r.ctx, r.id)
-}
-
-func (r *enterprise) RemoveGithubRunner(runnerID int64) (*github.Response, error) {
- return r.ghcEnterpriseCli.RemoveRunner(r.ctx, r.cfg.Name, runnerID)
-}
-
-func (r *enterprise) ListPools() ([]params.Pool, error) {
- pools, err := r.store.ListEnterprisePools(r.ctx, r.id)
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
- return pools, nil
-}
-
-func (r *enterprise) GithubURL() string {
- return fmt.Sprintf("%s/enterprises/%s", r.cfgInternal.GithubCredentialsDetails.BaseURL, r.cfg.Name)
-}
-
-func (r *enterprise) JwtToken() string {
- return r.cfgInternal.JWTSecret
-}
-
-func (r *enterprise) GetGithubRegistrationToken() (string, error) {
- tk, ghResp, err := r.ghcEnterpriseCli.CreateRegistrationToken(r.ctx, r.cfg.Name)
-
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return "", errors.Wrap(runnerErrors.ErrUnauthorized, "fetching registration token")
- }
- return "", errors.Wrap(err, "creating runner token")
- }
- return *tk.Token, nil
-}
-
-func (r *enterprise) String() string {
- return r.cfg.Name
-}
-
-func (r *enterprise) WebhookSecret() string {
- return r.cfg.WebhookSecret
-}
-
-func (r *enterprise) GetCallbackURL() string {
- return r.cfgInternal.InstanceCallbackURL
-}
-
-func (r *enterprise) GetMetadataURL() string {
- return r.cfgInternal.InstanceMetadataURL
-}
-
-func (r *enterprise) FindPoolByTags(labels []string) (params.Pool, error) {
- pool, err := r.store.FindEnterprisePoolByTags(r.ctx, r.id, labels)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching suitable pool")
- }
- return pool, nil
-}
-
-func (r *enterprise) GetPoolByID(poolID string) (params.Pool, error) {
- pool, err := r.store.GetEnterprisePool(r.ctx, r.id, poolID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool, nil
-}
-
-func (r *enterprise) ValidateOwner(job params.WorkflowJob) error {
- if !strings.EqualFold(job.Enterprise.Slug, r.cfg.Name) {
- return runnerErrors.NewBadRequestError("job not meant for this pool manager")
- }
- return nil
-}
-
-func (r *enterprise) ID() string {
- return r.id
-}
diff --git a/runner/pool/interfaces.go b/runner/pool/interfaces.go
deleted file mode 100644
index e96d5d88..00000000
--- a/runner/pool/interfaces.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package pool
-
-import (
- "github.com/cloudbase/garm/params"
-
- "github.com/google/go-github/v48/github"
-)
-
-type poolHelper interface {
- GetGithubToken() string
- GetGithubRunners() ([]*github.Runner, error)
- GetGithubRegistrationToken() (string, error)
- GetRunnerInfoFromWorkflow(job params.WorkflowJob) (params.RunnerInfo, error)
- RemoveGithubRunner(runnerID int64) (*github.Response, error)
- FetchTools() ([]*github.RunnerApplicationDownload, error)
-
- FetchDbInstances() ([]params.Instance, error)
- ListPools() ([]params.Pool, error)
- GithubURL() string
- JwtToken() string
- String() string
- GetCallbackURL() string
- GetMetadataURL() string
- FindPoolByTags(labels []string) (params.Pool, error)
- GetPoolByID(poolID string) (params.Pool, error)
- ValidateOwner(job params.WorkflowJob) error
- UpdateState(param params.UpdatePoolStateParams) error
- WebhookSecret() string
- ID() string
-}
diff --git a/runner/pool/organization.go b/runner/pool/organization.go
deleted file mode 100644
index 36845cd8..00000000
--- a/runner/pool/organization.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package pool
-
-import (
- "context"
- "fmt"
- "net/http"
- "strings"
- "sync"
-
- dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/common"
- "github.com/cloudbase/garm/util"
-
- "github.com/google/go-github/v48/github"
- "github.com/pkg/errors"
-)
-
-// test that we implement PoolManager
-var _ poolHelper = &organization{}
-
-func NewOrganizationPoolManager(ctx context.Context, cfg params.Organization, cfgInternal params.Internal, providers map[string]common.Provider, store dbCommon.Store) (common.PoolManager, error) {
- ghc, _, err := util.GithubClient(ctx, cfgInternal.OAuth2Token, cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return nil, errors.Wrap(err, "getting github client")
- }
-
- helper := &organization{
- cfg: cfg,
- cfgInternal: cfgInternal,
- ctx: ctx,
- ghcli: ghc,
- id: cfg.ID,
- store: store,
- }
-
- repo := &basePoolManager{
- ctx: ctx,
- store: store,
- providers: providers,
- controllerID: cfgInternal.ControllerID,
- quit: make(chan struct{}),
- done: make(chan struct{}),
- helper: helper,
- credsDetails: cfgInternal.GithubCredentialsDetails,
- }
- return repo, nil
-}
-
-type organization struct {
- cfg params.Organization
- cfgInternal params.Internal
- ctx context.Context
- ghcli common.GithubClient
- id string
- store dbCommon.Store
-
- mux sync.Mutex
-}
-
-func (r *organization) GetRunnerInfoFromWorkflow(job params.WorkflowJob) (params.RunnerInfo, error) {
- if err := r.ValidateOwner(job); err != nil {
- return params.RunnerInfo{}, errors.Wrap(err, "validating owner")
- }
- workflow, ghResp, err := r.ghcli.GetWorkflowJobByID(r.ctx, job.Organization.Login, job.Repository.Name, job.WorkflowJob.ID)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return params.RunnerInfo{}, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching workflow info")
- }
- return params.RunnerInfo{}, errors.Wrap(err, "fetching workflow info")
- }
-
- if workflow.RunnerName != nil {
- return params.RunnerInfo{
- Name: *workflow.RunnerName,
- Labels: workflow.Labels,
- }, nil
- }
- return params.RunnerInfo{}, fmt.Errorf("failed to find runner name from workflow")
-}
-
-func (r *organization) UpdateState(param params.UpdatePoolStateParams) error {
- r.mux.Lock()
- defer r.mux.Unlock()
-
- r.cfg.WebhookSecret = param.WebhookSecret
-
- ghc, _, err := util.GithubClient(r.ctx, r.GetGithubToken(), r.cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return errors.Wrap(err, "getting github client")
- }
- r.ghcli = ghc
- return nil
-}
-
-func (r *organization) GetGithubToken() string {
- return r.cfgInternal.OAuth2Token
-}
-
-func (r *organization) GetGithubRunners() ([]*github.Runner, error) {
- opts := github.ListOptions{
- PerPage: 100,
- }
-
- var allRunners []*github.Runner
- for {
- runners, ghResp, err := r.ghcli.ListOrganizationRunners(r.ctx, r.cfg.Name, &opts)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching runners")
- }
- return nil, errors.Wrap(err, "fetching runners")
- }
- allRunners = append(allRunners, runners.Runners...)
- if ghResp.NextPage == 0 {
- break
- }
- opts.Page = ghResp.NextPage
- }
-
- return allRunners, nil
-}
-
-func (r *organization) FetchTools() ([]*github.RunnerApplicationDownload, error) {
- r.mux.Lock()
- defer r.mux.Unlock()
- tools, ghResp, err := r.ghcli.ListOrganizationRunnerApplicationDownloads(r.ctx, r.cfg.Name)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching tools")
- }
- return nil, errors.Wrap(err, "fetching runner tools")
- }
-
- return tools, nil
-}
-
-func (r *organization) FetchDbInstances() ([]params.Instance, error) {
- return r.store.ListOrgInstances(r.ctx, r.id)
-}
-
-func (r *organization) RemoveGithubRunner(runnerID int64) (*github.Response, error) {
- return r.ghcli.RemoveOrganizationRunner(r.ctx, r.cfg.Name, runnerID)
-}
-
-func (r *organization) ListPools() ([]params.Pool, error) {
- pools, err := r.store.ListOrgPools(r.ctx, r.id)
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
- return pools, nil
-}
-
-func (r *organization) GithubURL() string {
- return fmt.Sprintf("%s/%s", r.cfgInternal.GithubCredentialsDetails.BaseURL, r.cfg.Name)
-}
-
-func (r *organization) JwtToken() string {
- return r.cfgInternal.JWTSecret
-}
-
-func (r *organization) GetGithubRegistrationToken() (string, error) {
- tk, ghResp, err := r.ghcli.CreateOrganizationRegistrationToken(r.ctx, r.cfg.Name)
-
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return "", errors.Wrap(runnerErrors.ErrUnauthorized, "fetching token")
- }
-
- return "", errors.Wrap(err, "creating runner token")
- }
- return *tk.Token, nil
-}
-
-func (r *organization) String() string {
- return r.cfg.Name
-}
-
-func (r *organization) WebhookSecret() string {
- return r.cfg.WebhookSecret
-}
-
-func (r *organization) GetCallbackURL() string {
- return r.cfgInternal.InstanceCallbackURL
-}
-
-func (r *organization) GetMetadataURL() string {
- return r.cfgInternal.InstanceMetadataURL
-}
-
-func (r *organization) FindPoolByTags(labels []string) (params.Pool, error) {
- pool, err := r.store.FindOrganizationPoolByTags(r.ctx, r.id, labels)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching suitable pool")
- }
- return pool, nil
-}
-
-func (r *organization) GetPoolByID(poolID string) (params.Pool, error) {
- pool, err := r.store.GetOrganizationPool(r.ctx, r.id, poolID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool, nil
-}
-
-func (r *organization) ValidateOwner(job params.WorkflowJob) error {
- if !strings.EqualFold(job.Organization.Login, r.cfg.Name) {
- return runnerErrors.NewBadRequestError("job not meant for this pool manager")
- }
- return nil
-}
-
-func (r *organization) ID() string {
- return r.id
-}
diff --git a/runner/pool/pool.go b/runner/pool/pool.go
index e46bd4f0..eecb500a 100644
--- a/runner/pool/pool.go
+++ b/runner/pool/pool.go
@@ -16,210 +16,346 @@ package pool
import (
"context"
+ "crypto/rand"
+ "errors"
"fmt"
- "log"
+ "log/slog"
"math"
+ "math/big"
"net/http"
+ "strconv"
"strings"
"sync"
"time"
+ "github.com/google/go-github/v72/github"
+ "github.com/google/uuid"
+ "golang.org/x/sync/errgroup"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/cache"
dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
+ "github.com/cloudbase/garm/database/watcher"
+ "github.com/cloudbase/garm/locking"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
- providerCommon "github.com/cloudbase/garm/runner/providers/common"
- "github.com/cloudbase/garm/util"
-
- "github.com/google/go-github/v48/github"
- "github.com/pkg/errors"
- "golang.org/x/sync/errgroup"
+ garmUtil "github.com/cloudbase/garm/util"
+ ghClient "github.com/cloudbase/garm/util/github"
+ "github.com/cloudbase/garm/util/github/scalesets"
)
var (
- poolIDLabelprefix = "runner-pool-id:"
- controllerLabelPrefix = "runner-controller-id:"
+ poolIDLabelprefix = "runner-pool-id"
+ controllerLabelPrefix = "runner-controller-id"
+ // We tag runners that have been spawned as a result of a queued job with the job ID
+ // that spawned them. There is no way to guarantee that the runner spawned in response to a particular
+ // job, will be picked up by that job. We mark them so as in the very likely event that the runner
+ // has picked up a different job, we can clear the lock on the job that spaned it.
+ // The job it picked up would already be transitioned to in_progress so it will be ignored by the
+ // consume loop.
+ jobLabelPrefix = "in_response_to_job"
)
const (
// maxCreateAttempts is the number of times we will attempt to create an instance
// before we give up.
+ //
+ // nolint:golangci-lint,godox
// TODO: make this configurable(?)
maxCreateAttempts = 5
)
+func NewEntityPoolManager(ctx context.Context, entity params.ForgeEntity, instanceTokenGetter auth.InstanceTokenGetter, providers map[string]common.Provider, store dbCommon.Store) (common.PoolManager, error) {
+ ctx = garmUtil.WithSlogContext(
+ ctx,
+ slog.Any("pool_mgr", entity.String()),
+ slog.Any("endpoint", entity.Credentials.Endpoint.Name),
+ slog.Any("pool_type", entity.EntityType),
+ )
+ ghc, err := ghClient.Client(ctx, entity)
+ if err != nil {
+ return nil, fmt.Errorf("error getting github client: %w", err)
+ }
+
+ if entity.WebhookSecret == "" {
+ return nil, fmt.Errorf("webhook secret is empty")
+ }
+
+ controllerInfo, err := store.ControllerInfo()
+ if err != nil {
+ return nil, fmt.Errorf("error getting controller info: %w", err)
+ }
+
+ consumerID := fmt.Sprintf("pool-manager-%s-%s", entity.String(), entity.Credentials.Endpoint.Name)
+ slog.InfoContext(ctx, "registering consumer", "consumer_id", consumerID)
+ consumer, err := watcher.RegisterConsumer(
+ ctx, consumerID,
+ composeWatcherFilters(entity),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("error registering consumer: %w", err)
+ }
+
+ wg := &sync.WaitGroup{}
+ backoff, err := locking.NewInstanceDeleteBackoff(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error creating backoff: %w", err)
+ }
+
+ var scaleSetCli *scalesets.ScaleSetClient
+ if entity.Credentials.ForgeType == params.GithubEndpointType {
+ scaleSetCli, err = scalesets.NewClient(ghc)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get scalesets client: %w", err)
+ }
+ }
+ repo := &basePoolManager{
+ ctx: ctx,
+ consumerID: consumerID,
+ entity: entity,
+ ghcli: ghc,
+ scaleSetClient: scaleSetCli,
+ controllerInfo: controllerInfo,
+ instanceTokenGetter: instanceTokenGetter,
+
+ store: store,
+ providers: providers,
+ quit: make(chan struct{}),
+ wg: wg,
+ backoff: backoff,
+ consumer: consumer,
+ }
+ return repo, nil
+}
+
type basePoolManager struct {
- ctx context.Context
- controllerID string
+ ctx context.Context
+ consumerID string
+ entity params.ForgeEntity
+ ghcli common.GithubClient
+ scaleSetClient *scalesets.ScaleSetClient
+ controllerInfo params.ControllerInfo
+ instanceTokenGetter auth.InstanceTokenGetter
+ consumer dbCommon.Consumer
store dbCommon.Store
providers map[string]common.Provider
- tools []*github.RunnerApplicationDownload
+ tools []commonParams.RunnerApplicationDownload
quit chan struct{}
- done chan struct{}
-
- helper poolHelper
- credsDetails params.GithubCredentials
managerIsRunning bool
managerErrorReason string
- mux sync.Mutex
+ mux sync.Mutex
+ wg *sync.WaitGroup
+ backoff locking.InstanceDeleteBackoff
}
-func (r *basePoolManager) HandleWorkflowJob(job params.WorkflowJob) (err error) {
- if err := r.helper.ValidateOwner(job); err != nil {
- return errors.Wrap(err, "validating owner")
+func (r *basePoolManager) getProviderBaseParams(pool params.Pool) common.ProviderBaseParams {
+ r.mux.Lock()
+ defer r.mux.Unlock()
+
+ return common.ProviderBaseParams{
+ PoolInfo: pool,
+ ControllerInfo: r.controllerInfo,
+ }
+}
+
+func (r *basePoolManager) HandleWorkflowJob(job params.WorkflowJob) error {
+ if err := r.ValidateOwner(job); err != nil {
+ slog.ErrorContext(r.ctx, "failed to validate owner", "error", err)
+ return fmt.Errorf("error validating owner: %w", err)
}
+ // we see events where the lables seem to be missing. We should ignore these
+ // as we can't know if we should handle them or not.
+ if len(job.WorkflowJob.Labels) == 0 {
+ slog.WarnContext(r.ctx, "job has no labels", "workflow_job", job.WorkflowJob.Name)
+ return nil
+ }
+
+ jobParams, err := r.paramsWorkflowJobToParamsJob(job)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to convert job to params", "error", err)
+ return fmt.Errorf("error converting job to params: %w", err)
+ }
+
+ var triggeredBy int64
defer func() {
- if err != nil && errors.Is(err, runnerErrors.ErrUnauthorized) {
- r.setPoolRunningState(false, fmt.Sprintf("failed to handle job: %q", err))
+ if jobParams.WorkflowJobID == 0 {
+ return
+ }
+ // we're updating the job in the database, regardless of whether it was successful or not.
+ // or if it was meant for this pool or not. Github will send the same job data to all hierarchies
+ // that have been configured to work with garm. Updating the job at all levels should yield the same
+ // outcome in the db.
+ _, err := r.store.GetJobByID(r.ctx, jobParams.WorkflowJobID)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to get job",
+ "job_id", jobParams.WorkflowJobID)
+ return
+ }
+ // This job is new to us. Check if we have a pool that can handle it.
+ potentialPools := cache.FindPoolsMatchingAllTags(r.entity.ID, jobParams.Labels)
+ if len(potentialPools) == 0 {
+ slog.WarnContext(
+ r.ctx, "no pools matching tags; not recording job",
+ "requested_tags", strings.Join(jobParams.Labels, ", "))
+ return
+ }
+ }
+
+ if _, jobErr := r.store.CreateOrUpdateJob(r.ctx, jobParams); jobErr != nil {
+ slog.With(slog.Any("error", jobErr)).ErrorContext(
+ r.ctx, "failed to update job", "job_id", jobParams.WorkflowJobID)
+ }
+
+ if triggeredBy != 0 && jobParams.WorkflowJobID != triggeredBy {
+ // The triggeredBy value is only set by the "in_progress" webhook. The runner that
+ // transitioned to in_progress was created as a result of a different queued job. If that job is
+ // still queued and we don't remove the lock, it will linger until the lock timeout is reached.
+ // That may take a long time, so we break the lock here and allow it to be scheduled again.
+ if err := r.store.BreakLockJobIsQueued(r.ctx, triggeredBy); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to break lock for job",
+ "job_id", triggeredBy)
+ }
}
}()
switch job.Action {
case "queued":
- // Create instance in database and set it to pending create.
- // If we already have an idle runner around, that runner will pick up the job
- // and trigger an "in_progress" update from github (see bellow), which in turn will set the
- // runner state of the instance to "active". The ensureMinIdleRunners() function will
- // exclude that runner from available runners and attempt to ensure
- // the needed number of runners.
- if err := r.acquireNewInstance(job); err != nil {
- log.Printf("failed to add instance: %s", err)
- }
+ // Record the job in the database. Queued jobs will be picked up by the consumeQueuedJobs() method
+ // when reconciling.
case "completed":
- // ignore the error here. A completed job may not have a runner name set
- // if it was never assigned to a runner, and was canceled.
- runnerInfo, err := r.getRunnerDetailsFromJob(job)
- if err != nil {
- if !errors.Is(err, runnerErrors.ErrUnauthorized) {
- // Unassigned jobs will have an empty runner_name.
- // We also need to ignore not found errors, as we may get a webhook regarding
- // a workflow that is handled by a runner at a different hierarchy level.
- return nil
- }
- return errors.Wrap(err, "updating runner")
+ // If job was not assigned to a runner, we can ignore it.
+ if jobParams.RunnerName == "" {
+ slog.InfoContext(
+ r.ctx, "job never got assigned to a runner, ignoring")
+ return nil
+ }
+
+ fromCache, ok := cache.GetInstanceCache(jobParams.RunnerName)
+ if !ok {
+ return nil
+ }
+
+ if _, ok := cache.GetEntityPool(r.entity.ID, fromCache.PoolID); !ok {
+ slog.DebugContext(r.ctx, "instance belongs to a pool not managed by this entity", "pool_id", fromCache.PoolID)
+ return nil
}
// update instance workload state.
- if err := r.setInstanceRunnerStatus(runnerInfo.Name, providerCommon.RunnerTerminated); err != nil {
+ if _, err := r.setInstanceRunnerStatus(jobParams.RunnerName, params.RunnerTerminated); err != nil {
if errors.Is(err, runnerErrors.ErrNotFound) {
return nil
}
- log.Printf("failed to update runner %s status", util.SanitizeLogEntry(runnerInfo.Name))
- return errors.Wrap(err, "updating runner")
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", util.SanitizeLogEntry(jobParams.RunnerName))
+ return fmt.Errorf("error updating runner: %w", err)
}
- log.Printf("marking instance %s as pending_delete", util.SanitizeLogEntry(runnerInfo.Name))
- if err := r.setInstanceStatus(runnerInfo.Name, providerCommon.InstancePendingDelete, nil); err != nil {
+ slog.DebugContext(
+ r.ctx, "marking instance as pending_delete",
+ "runner_name", util.SanitizeLogEntry(jobParams.RunnerName))
+ if _, err := r.setInstanceStatus(jobParams.RunnerName, commonParams.InstancePendingDelete, nil); err != nil {
if errors.Is(err, runnerErrors.ErrNotFound) {
return nil
}
- log.Printf("failed to update runner %s status", util.SanitizeLogEntry(runnerInfo.Name))
- return errors.Wrap(err, "updating runner")
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", util.SanitizeLogEntry(jobParams.RunnerName))
+ return fmt.Errorf("error updating runner: %w", err)
}
case "in_progress":
- // in_progress jobs must have a runner name/ID assigned. Sometimes github will send a hook without
- // a runner set. In such cases, we attemt to fetch it from the API.
- runnerInfo, err := r.getRunnerDetailsFromJob(job)
- if err != nil {
- if errors.Is(err, runnerErrors.ErrNotFound) {
- // This is most likely a runner we're not managing. If we define a repo from within an org
- // and also define that same org, we will get a hook from github from both the repo and the org
- // regarding the same workflow. We look for the runner in the database, and make sure it exists and is
- // part of a pool that this manager is responsible for. A not found error here will most likely mean
- // that we are not responsible for that runner, and we should ignore it.
- return nil
- }
- return errors.Wrap(err, "determining runner name")
+ fromCache, ok := cache.GetInstanceCache(jobParams.RunnerName)
+ if !ok {
+ slog.DebugContext(r.ctx, "instance not found in cache", "runner_name", jobParams.RunnerName)
+ return nil
}
+ pool, ok := cache.GetEntityPool(r.entity.ID, fromCache.PoolID)
+ if !ok {
+ slog.DebugContext(r.ctx, "instance belongs to a pool not managed by this entity", "pool_id", fromCache.PoolID)
+ return nil
+ }
// update instance workload state.
- if err := r.setInstanceRunnerStatus(runnerInfo.Name, providerCommon.RunnerActive); err != nil {
+ instance, err := r.setInstanceRunnerStatus(jobParams.RunnerName, params.RunnerActive)
+ if err != nil {
if errors.Is(err, runnerErrors.ErrNotFound) {
return nil
}
- log.Printf("failed to update runner %s status", util.SanitizeLogEntry(runnerInfo.Name))
- return errors.Wrap(err, "updating runner")
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", util.SanitizeLogEntry(jobParams.RunnerName))
+ return fmt.Errorf("error updating runner: %w", err)
+ }
+ // Set triggeredBy here so we break the lock on any potential queued job.
+ triggeredBy = jobIDFromLabels(instance.AditionalLabels)
+
+ // A runner has picked up the job, and is now running it. It may need to be replaced if the pool has
+ // a minimum number of idle runners configured.
+ if err := r.ensureIdleRunnersForOnePool(pool); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "error ensuring idle runners for pool",
+ "pool_id", pool.ID)
}
}
return nil
}
-func (r *basePoolManager) loop() {
- scaleDownTimer := time.NewTicker(common.PoolScaleDownInterval)
- consolidateTimer := time.NewTicker(common.PoolConsilitationInterval)
- reapTimer := time.NewTicker(common.PoolReapTimeoutInterval)
- toolUpdateTimer := time.NewTicker(common.PoolToolUpdateInterval)
- defer func() {
- log.Printf("%s loop exited", r.helper.String())
- scaleDownTimer.Stop()
- consolidateTimer.Stop()
- reapTimer.Stop()
- toolUpdateTimer.Stop()
- close(r.done)
- }()
- log.Printf("starting loop for %s", r.helper.String())
+func jobIDFromLabels(labels []string) int64 {
+ for _, lbl := range labels {
+ if strings.HasPrefix(lbl, jobLabelPrefix) {
+ trimLength := min(len(jobLabelPrefix)+1, len(lbl))
+ jobID, err := strconv.ParseInt(lbl[trimLength:], 10, 64)
+ if err != nil {
+ return 0
+ }
+ return jobID
+ }
+ }
+ return 0
+}
+
+func (r *basePoolManager) startLoopForFunction(f func() error, interval time.Duration, name string, alwaysRun bool) {
+ slog.InfoContext(
+ r.ctx, "starting loop for entity",
+ "loop_name", name)
+ ticker := time.NewTicker(interval)
+ r.wg.Add(1)
+
+ defer func() {
+ slog.InfoContext(
+ r.ctx, "pool loop exited",
+ "loop_name", name)
+ ticker.Stop()
+ r.wg.Done()
+ }()
- // Consolidate runners on loop start. Provider runners must match runners
- // in github and DB. When a Workflow job is received, we will first create/update
- // an entity in the database, before sending the request to the provider to create/delete
- // an instance. If a "queued" job is received, we create an entity in the db with
- // a state of "pending_create". Once that instance is up and calls home, it is marked
- // as "active". If a "completed" job is received from github, we mark the instance
- // as "pending_delete". Once the provider deletes the instance, we mark it as "deleted"
- // in the database.
- // We also ensure we have runners created based on pool characteristics. This is where
- // we spin up "MinWorkers" for each runner type.
for {
- switch r.managerIsRunning {
+ shouldRun := r.managerIsRunning
+ if alwaysRun {
+ shouldRun = true
+ }
+ switch shouldRun {
case true:
select {
- case <-reapTimer.C:
- runners, err := r.helper.GetGithubRunners()
- if err != nil {
- failureReason := fmt.Sprintf("error fetching github runners for %s: %s", r.helper.String(), err)
- r.setPoolRunningState(false, failureReason)
- log.Print(failureReason)
+ case <-ticker.C:
+ if err := f(); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "error in loop",
+ "loop_name", name)
if errors.Is(err, runnerErrors.ErrUnauthorized) {
- break
- }
- continue
- }
- if err := r.reapTimedOutRunners(runners); err != nil {
- log.Printf("failed to reap timed out runners: %q", err)
- }
-
- if err := r.runnerCleanup(); err != nil {
- failureReason := fmt.Sprintf("failed to clean runners for %s: %q", r.helper.String(), err)
- log.Print(failureReason)
- if errors.Is(err, runnerErrors.ErrUnauthorized) {
- r.setPoolRunningState(false, failureReason)
+ r.SetPoolRunningState(false, err.Error())
}
}
- case <-consolidateTimer.C:
- // consolidate.
- r.consolidate()
- case <-scaleDownTimer.C:
- r.scaleDown()
- case <-toolUpdateTimer.C:
- // Update tools cache.
- tools, err := r.helper.FetchTools()
- if err != nil {
- failureReason := fmt.Sprintf("failed to update tools for repo %s: %s", r.helper.String(), err)
- r.setPoolRunningState(false, failureReason)
- log.Print(failureReason)
- if errors.Is(err, runnerErrors.ErrUnauthorized) {
- break
- }
- continue
- }
- r.mux.Lock()
- r.tools = tools
- r.mux.Unlock()
case <-r.ctx.Done():
// daemon is shutting down.
return
@@ -236,68 +372,28 @@ func (r *basePoolManager) loop() {
// this worker was stopped.
return
default:
- log.Printf("attempting to start pool manager for %s", r.helper.String())
- tools, err := r.helper.FetchTools()
- var failureReason string
- if err != nil {
- failureReason = fmt.Sprintf("failed to fetch tools from github for %s: %q", r.helper.String(), err)
- r.setPoolRunningState(false, failureReason)
- log.Print(failureReason)
- if errors.Is(err, runnerErrors.ErrUnauthorized) {
- r.waitForTimeoutOrCanceled(common.UnauthorizedBackoffTimer)
- } else {
- r.waitForTimeoutOrCanceled(60 * time.Second)
- }
- continue
- }
- r.mux.Lock()
- r.tools = tools
- r.mux.Unlock()
-
- if err := r.runnerCleanup(); err != nil {
- failureReason = fmt.Sprintf("failed to clean runners for %s: %q", r.helper.String(), err)
- log.Print(failureReason)
- if errors.Is(err, runnerErrors.ErrUnauthorized) {
- r.setPoolRunningState(false, failureReason)
- r.waitForTimeoutOrCanceled(common.UnauthorizedBackoffTimer)
- }
- continue
- }
- r.setPoolRunningState(true, "")
+ r.waitForTimeoutOrCancelled(common.BackoffTimer)
}
}
}
}
-func controllerIDFromLabels(labels []string) string {
- for _, lbl := range labels {
- if strings.HasPrefix(lbl, controllerLabelPrefix) {
- return lbl[len(controllerLabelPrefix):]
- }
- }
- return ""
-}
-
-func labelsFromRunner(runner *github.Runner) []string {
- if runner == nil || runner.Labels == nil {
- return []string{}
+func (r *basePoolManager) updateTools() error {
+ tools, err := cache.GetGithubToolsCache(r.entity.ID)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update tools for entity", "entity", r.entity.String())
+ r.SetPoolRunningState(false, err.Error())
+ return fmt.Errorf("failed to update tools for entity %s: %w", r.entity.String(), err)
}
- var labels []string
- for _, val := range runner.Labels {
- if val == nil {
- continue
- }
- labels = append(labels, val.GetName())
- }
- return labels
-}
+ r.mux.Lock()
+ r.tools = tools
+ r.mux.Unlock()
-// isManagedRunner returns true if labels indicate the runner belongs to a pool
-// this manager is responsible for.
-func (r *basePoolManager) isManagedRunner(labels []string) bool {
- runnerControllerID := controllerIDFromLabels(labels)
- return runnerControllerID == r.controllerID
+ slog.DebugContext(r.ctx, "successfully updated tools")
+ r.SetPoolRunningState(true, "")
+ return nil
}
// cleanupOrphanedProviderRunners compares runners in github with local runners and removes
@@ -307,49 +403,77 @@ func (r *basePoolManager) isManagedRunner(labels []string) bool {
// happens, github will remove the ephemeral worker and send a webhook our way.
// If we were offline and did not process the webhook, the instance will linger.
// We need to remove it from the provider and database.
-func (r *basePoolManager) cleanupOrphanedProviderRunners(runners []*github.Runner) error {
- dbInstances, err := r.helper.FetchDbInstances()
+func (r *basePoolManager) cleanupOrphanedProviderRunners(runners []forgeRunner) error {
+ dbInstances, err := r.store.ListEntityInstances(r.ctx, r.entity)
if err != nil {
- return errors.Wrap(err, "fetching instances from db")
+ return fmt.Errorf("error fetching instances from db: %w", err)
}
runnerNames := map[string]bool{}
for _, run := range runners {
- if !r.isManagedRunner(labelsFromRunner(run)) {
- log.Printf("runner %s is not managed by a pool belonging to %s", *run.Name, r.helper.String())
+ if !isManagedRunner(labelsFromRunner(run), r.controllerInfo.ControllerID.String()) {
+ slog.DebugContext(
+ r.ctx, "runner is not managed by a pool we manage",
+ "runner_name", run.Name)
continue
}
- runnerNames[*run.Name] = true
+ runnerNames[run.Name] = true
}
for _, instance := range dbInstances {
- switch providerCommon.InstanceStatus(instance.Status) {
- case providerCommon.InstancePendingCreate,
- providerCommon.InstancePendingDelete:
+ if instance.ScaleSetID != 0 {
+ // ignore scale set instances.
+ continue
+ }
+
+ lockAcquired := locking.TryLock(instance.Name, r.consumerID)
+ if !lockAcquired {
+ slog.DebugContext(
+ r.ctx, "failed to acquire lock for instance",
+ "runner_name", instance.Name)
+ continue
+ }
+ defer locking.Unlock(instance.Name, false)
+
+ switch instance.Status {
+ case commonParams.InstancePendingCreate,
+ commonParams.InstancePendingDelete, commonParams.InstancePendingForceDelete:
// this instance is in the process of being created or is awaiting deletion.
// Instances in pending_create did not get a chance to register themselves in,
// github so we let them be for now.
continue
}
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, instance.PoolID)
+ if err != nil {
+ return fmt.Errorf("error fetching instance pool info: %w", err)
+ }
switch instance.RunnerStatus {
- case providerCommon.RunnerPending, providerCommon.RunnerInstalling:
- // runner is still installing. We give it a chance to finish.
- log.Printf("runner %s is still installing, give it a chance to finish", instance.Name)
- continue
+ case params.RunnerPending, params.RunnerInstalling:
+ if time.Since(instance.UpdatedAt).Minutes() < float64(pool.RunnerTimeout()) {
+ // runner is still installing. We give it a chance to finish.
+ slog.DebugContext(
+ r.ctx, "runner is still installing, give it a chance to finish",
+ "runner_name", instance.Name)
+ continue
+ }
}
if time.Since(instance.UpdatedAt).Minutes() < 5 {
// instance was updated recently. We give it a chance to register itself in github.
- log.Printf("instance %s was updated recently, skipping check", instance.Name)
+ slog.DebugContext(
+ r.ctx, "instance was updated recently, skipping check",
+ "runner_name", instance.Name)
continue
}
if ok := runnerNames[instance.Name]; !ok {
// Set pending_delete on DB field. Allow consolidate() to remove it.
- if err := r.setInstanceStatus(instance.Name, providerCommon.InstancePendingDelete, nil); err != nil {
- log.Printf("failed to update runner %s status", instance.Name)
- return errors.Wrap(err, "updating runner")
+ if _, err := r.setInstanceStatus(instance.Name, commonParams.InstancePendingDelete, nil); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner",
+ "runner_name", instance.Name)
+ return fmt.Errorf("error updating runner: %w", err)
}
}
}
@@ -359,313 +483,351 @@ func (r *basePoolManager) cleanupOrphanedProviderRunners(runners []*github.Runne
// reapTimedOutRunners will mark as pending_delete any runner that has a status
// of "running" in the provider, but that has not registered with Github, and has
// received no new updates in the configured timeout interval.
-func (r *basePoolManager) reapTimedOutRunners(runners []*github.Runner) error {
- dbInstances, err := r.helper.FetchDbInstances()
+func (r *basePoolManager) reapTimedOutRunners(runners []forgeRunner) error {
+ dbInstances, err := r.store.ListEntityInstances(r.ctx, r.entity)
if err != nil {
- return errors.Wrap(err, "fetching instances from db")
+ return fmt.Errorf("error fetching instances from db: %w", err)
}
- runnersByName := map[string]*github.Runner{}
+ runnersByName := map[string]forgeRunner{}
for _, run := range runners {
- if !r.isManagedRunner(labelsFromRunner(run)) {
- log.Printf("runner %s is not managed by a pool belonging to %s", *run.Name, r.helper.String())
+ if !isManagedRunner(labelsFromRunner(run), r.controllerInfo.ControllerID.String()) {
+ slog.DebugContext(
+ r.ctx, "runner is not managed by a pool we manage",
+ "runner_name", run.Name)
continue
}
- runnersByName[*run.Name] = run
+ runnersByName[run.Name] = run
}
for _, instance := range dbInstances {
- pool, err := r.store.GetPoolByID(r.ctx, instance.PoolID)
+ if instance.ScaleSetID != 0 {
+ // ignore scale set instances.
+ continue
+ }
+
+ slog.DebugContext(
+ r.ctx, "attempting to lock instance",
+ "runner_name", instance.Name)
+ lockAcquired := locking.TryLock(instance.Name, r.consumerID)
+ if !lockAcquired {
+ slog.DebugContext(
+ r.ctx, "failed to acquire lock for instance",
+ "runner_name", instance.Name)
+ continue
+ }
+ defer locking.Unlock(instance.Name, false)
+
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, instance.PoolID)
if err != nil {
- return errors.Wrap(err, "fetching instance pool info")
+ return fmt.Errorf("error fetching instance pool info: %w", err)
}
if time.Since(instance.UpdatedAt).Minutes() < float64(pool.RunnerTimeout()) {
continue
}
- // There are 2 cases (currently) where we consider a runner as timed out:
+ // There are 3 cases (currently) where we consider a runner as timed out:
// * The runner never joined github within the pool timeout
// * The runner managed to join github, but the setup process failed later and the runner
// never started on the instance.
- //
- // There are several steps in the user data that sets up the runner:
- // * Download and unarchive the runner from github (or used the cached version)
- // * Configure runner (connects to github). At this point the runner is seen as offline.
- // * Install the service
- // * Set SELinux context (if SELinux is enabled)
- // * Start the service (if successful, the runner will transition to "online")
- // * Get the runner ID
- //
- // If we fail getting the runner ID after it's started, garm will set the runner status to "failed",
- // even though, technically the runner is online and fully functional. This is why we check here for
- // both the runner status as reported by GitHub and the runner status as reported by the provider.
- // If the runner is "offline" and marked as "failed", it should be safe to reap it.
- if runner, ok := runnersByName[instance.Name]; !ok || (runner.GetStatus() == "offline" && instance.RunnerStatus == providerCommon.RunnerFailed) {
- log.Printf("reaping timed-out/failed runner %s", instance.Name)
- if err := r.ForceDeleteRunner(instance); err != nil {
- log.Printf("failed to update runner %s status", instance.Name)
- return errors.Wrap(err, "updating runner")
+ // * A JIT config was created, but the runner never joined github.
+ if runner, ok := runnersByName[instance.Name]; !ok || runner.Status == "offline" {
+ slog.InfoContext(
+ r.ctx, "reaping timed-out/failed runner",
+ "runner_name", instance.Name)
+ if err := r.DeleteRunner(instance, false, false); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", instance.Name)
+ return fmt.Errorf("error updating runner: %w", err)
}
}
}
return nil
}
-func instanceInList(instanceName string, instances []params.Instance) (params.Instance, bool) {
- for _, val := range instances {
- if val.Name == instanceName {
- return val, true
- }
- }
- return params.Instance{}, false
-}
-
// cleanupOrphanedGithubRunners will forcefully remove any github runners that appear
// as offline and for which we no longer have a local instance.
// This may happen if someone manually deletes the instance in the provider. We need to
// first remove the instance from github, and then from our database.
-func (r *basePoolManager) cleanupOrphanedGithubRunners(runners []*github.Runner) error {
- poolInstanceCache := map[string][]params.Instance{}
+func (r *basePoolManager) cleanupOrphanedGithubRunners(runners []forgeRunner) error {
+ poolInstanceCache := map[string][]commonParams.ProviderInstance{}
g, ctx := errgroup.WithContext(r.ctx)
for _, runner := range runners {
- if !r.isManagedRunner(labelsFromRunner(runner)) {
- log.Printf("runner %s is not managed by a pool belonging to %s", *runner.Name, r.helper.String())
+ if !isManagedRunner(labelsFromRunner(runner), r.controllerInfo.ControllerID.String()) {
+ slog.DebugContext(
+ r.ctx, "runner is not managed by a pool we manage",
+ "runner_name", runner.Name)
continue
}
- status := runner.GetStatus()
+ status := runner.Status
if status != "offline" {
// Runner is online. Ignore it.
continue
}
- dbInstance, err := r.store.GetInstanceByName(r.ctx, *runner.Name)
+ dbInstance, err := r.store.GetInstance(r.ctx, runner.Name)
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return errors.Wrap(err, "fetching instance from DB")
+ return fmt.Errorf("error fetching instance from DB: %w", err)
}
// We no longer have a DB entry for this instance, and the runner appears offline in github.
// Previous forceful removal may have failed?
- log.Printf("Runner %s has no database entry in garm, removing from github", *runner.Name)
- resp, err := r.helper.RemoveGithubRunner(*runner.ID)
- if err != nil {
+ slog.InfoContext(
+ r.ctx, "Runner has no database entry in garm, removing from github",
+ "runner_name", runner.Name)
+ if err := r.ghcli.RemoveEntityRunner(r.ctx, runner.ID); err != nil {
// Removed in the meantime?
- if resp != nil && resp.StatusCode == http.StatusNotFound {
+ if errors.Is(err, runnerErrors.ErrNotFound) {
continue
}
- return errors.Wrap(err, "removing runner")
+ return fmt.Errorf("error removing runner: %w", err)
}
continue
}
+ if dbInstance.ScaleSetID != 0 {
+ // ignore scale set instances.
+ continue
+ }
- switch providerCommon.InstanceStatus(dbInstance.Status) {
- case providerCommon.InstancePendingDelete, providerCommon.InstanceDeleting:
+ switch dbInstance.Status {
+ case commonParams.InstancePendingDelete, commonParams.InstanceDeleting:
// already marked for deletion or is in the process of being deleted.
// Let consolidate take care of it.
continue
+ case commonParams.InstancePendingCreate, commonParams.InstanceCreating:
+ // instance is still being created. We give it a chance to finish.
+ slog.DebugContext(
+ r.ctx, "instance is still being created, give it a chance to finish",
+ "runner_name", dbInstance.Name)
+ continue
+ case commonParams.InstanceRunning:
+ // this check is not strictly needed, but can help avoid unnecessary strain on the provider.
+ // At worst, we will have a runner that is offline in github for 5 minutes before we reap it.
+ if time.Since(dbInstance.UpdatedAt).Minutes() < 5 {
+ // instance was updated recently. We give it a chance to register itself in github.
+ slog.DebugContext(
+ r.ctx, "instance was updated recently, skipping check",
+ "runner_name", dbInstance.Name)
+ continue
+ }
}
- pool, err := r.helper.GetPoolByID(dbInstance.PoolID)
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, dbInstance.PoolID)
if err != nil {
- return errors.Wrap(err, "fetching pool")
+ return fmt.Errorf("error fetching pool: %w", err)
}
// check if the provider still has the instance.
- provider, ok := r.providers[pool.ProviderName]
+ provider, ok := r.providers[dbInstance.ProviderName]
if !ok {
- return fmt.Errorf("unknown provider %s for pool %s", pool.ProviderName, pool.ID)
+ return fmt.Errorf("unknown provider %s for pool %s", dbInstance.ProviderName, dbInstance.PoolID)
}
- var poolInstances []params.Instance
- poolInstances, ok = poolInstanceCache[pool.ID]
+ var poolInstances []commonParams.ProviderInstance
+ poolInstances, ok = poolInstanceCache[dbInstance.PoolID]
if !ok {
- log.Printf("updating instances cache for pool %s", pool.ID)
- poolInstances, err = provider.ListInstances(r.ctx, pool.ID)
- if err != nil {
- return errors.Wrapf(err, "fetching instances for pool %s", pool.ID)
+ slog.DebugContext(
+ r.ctx, "updating instances cache for pool",
+ "pool_id", pool.ID)
+ listInstancesParams := common.ListInstancesParams{
+ ListInstancesV011: common.ListInstancesV011Params{
+ ProviderBaseParams: r.getProviderBaseParams(pool),
+ },
}
- poolInstanceCache[pool.ID] = poolInstances
+ poolInstances, err = provider.ListInstances(r.ctx, pool.ID, listInstancesParams)
+ if err != nil {
+ return fmt.Errorf("error fetching instances for pool %s: %w", dbInstance.PoolID, err)
+ }
+ poolInstanceCache[dbInstance.PoolID] = poolInstances
}
+
+ lockAcquired := locking.TryLock(dbInstance.Name, r.consumerID)
+ if !lockAcquired {
+ slog.DebugContext(
+ r.ctx, "failed to acquire lock for instance",
+ "runner_name", dbInstance.Name)
+ continue
+ }
+
// See: https://golang.org/doc/faq#closures_and_goroutines
runner := runner
g.Go(func() error {
+ deleteMux := false
+ defer func() {
+ locking.Unlock(dbInstance.Name, deleteMux)
+ }()
providerInstance, ok := instanceInList(dbInstance.Name, poolInstances)
if !ok {
// The runner instance is no longer on the provider, and it appears offline in github.
// It should be safe to force remove it.
- log.Printf("Runner instance for %s is no longer on the provider, removing from github", dbInstance.Name)
- resp, err := r.helper.RemoveGithubRunner(*runner.ID)
- if err != nil {
+ slog.InfoContext(
+ r.ctx, "Runner instance is no longer on the provider, removing from github",
+ "runner_name", dbInstance.Name)
+ if err := r.ghcli.RemoveEntityRunner(r.ctx, runner.ID); err != nil {
// Removed in the meantime?
- if resp != nil && resp.StatusCode == http.StatusNotFound {
- log.Printf("runner dissapeared from github")
+ if errors.Is(err, runnerErrors.ErrNotFound) {
+ slog.DebugContext(
+ r.ctx, "runner disappeared from github",
+ "runner_name", dbInstance.Name)
} else {
- return errors.Wrap(err, "removing runner from github")
+ return fmt.Errorf("error removing runner from github: %w", err)
}
}
// Remove the database entry for the runner.
- log.Printf("Removing %s from database", dbInstance.Name)
+ slog.InfoContext(
+ r.ctx, "Removing from database",
+ "runner_name", dbInstance.Name)
if err := r.store.DeleteInstance(ctx, dbInstance.PoolID, dbInstance.Name); err != nil {
- return errors.Wrap(err, "removing runner from database")
+ return fmt.Errorf("error removing runner from database: %w", err)
}
+ deleteMux = true
return nil
}
- if providerInstance.Status == providerCommon.InstanceRunning {
+ if providerInstance.Status == commonParams.InstanceRunning {
// instance is running, but github reports runner as offline. Log the event.
// This scenario may require manual intervention.
// Perhaps it just came online and github did not yet change it's status?
- log.Printf("instance %s is online but github reports runner as offline", dbInstance.Name)
+ slog.WarnContext(
+ r.ctx, "instance is online but github reports runner as offline",
+ "runner_name", dbInstance.Name)
return nil
- } else {
- log.Printf("instance %s was found in stopped state; starting", dbInstance.Name)
- //start the instance
- if err := provider.Start(r.ctx, dbInstance.ProviderID); err != nil {
- return errors.Wrapf(err, "starting instance %s", dbInstance.ProviderID)
- }
+ }
+
+ slog.InfoContext(
+ r.ctx, "instance was found in stopped state; starting",
+ "runner_name", dbInstance.Name)
+
+ startParams := common.StartParams{
+ StartV011: common.StartV011Params{
+ ProviderBaseParams: r.getProviderBaseParams(pool),
+ },
+ }
+ if err := provider.Start(r.ctx, dbInstance.ProviderID, startParams); err != nil {
+ return fmt.Errorf("error starting instance %s: %w", dbInstance.ProviderID, err)
}
return nil
})
}
- if err := g.Wait(); err != nil {
- return errors.Wrap(err, "removing orphaned github runners")
+ if err := r.waitForErrorGroupOrContextCancelled(g); err != nil {
+ return fmt.Errorf("error removing orphaned github runners: %w", err)
}
return nil
}
-func (r *basePoolManager) fetchInstance(runnerName string) (params.Instance, error) {
- runner, err := r.store.GetInstanceByName(r.ctx, runnerName)
- if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching instance")
+func (r *basePoolManager) waitForErrorGroupOrContextCancelled(g *errgroup.Group) error {
+ if g == nil {
+ return nil
}
- _, err = r.helper.GetPoolByID(runner.PoolID)
- if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching pool")
- }
+ done := make(chan error, 1)
+ go func() {
+ waitErr := g.Wait()
+ done <- waitErr
+ }()
- return runner, nil
+ select {
+ case err := <-done:
+ return err
+ case <-r.ctx.Done():
+ return r.ctx.Err()
+ }
}
-func (r *basePoolManager) setInstanceRunnerStatus(runnerName string, status providerCommon.RunnerStatus) error {
+func (r *basePoolManager) setInstanceRunnerStatus(runnerName string, status params.RunnerStatus) (params.Instance, error) {
updateParams := params.UpdateInstanceParams{
RunnerStatus: status,
}
-
- if err := r.updateInstance(runnerName, updateParams); err != nil {
- return errors.Wrap(err, "updating runner state")
- }
- return nil
-}
-
-func (r *basePoolManager) updateInstance(runnerName string, update params.UpdateInstanceParams) error {
- runner, err := r.fetchInstance(runnerName)
+ instance, err := r.store.UpdateInstance(r.ctx, runnerName, updateParams)
if err != nil {
- return errors.Wrap(err, "fetching instance")
+ return params.Instance{}, fmt.Errorf("error updating runner state: %w", err)
}
-
- if _, err := r.store.UpdateInstance(r.ctx, runner.ID, update); err != nil {
- return errors.Wrap(err, "updating runner state")
- }
- return nil
+ return instance, nil
}
-func (r *basePoolManager) setInstanceStatus(runnerName string, status providerCommon.InstanceStatus, providerFault []byte) error {
+func (r *basePoolManager) setInstanceStatus(runnerName string, status commonParams.InstanceStatus, providerFault []byte) (params.Instance, error) {
updateParams := params.UpdateInstanceParams{
Status: status,
ProviderFault: providerFault,
}
- if err := r.updateInstance(runnerName, updateParams); err != nil {
- return errors.Wrap(err, "updating runner state")
+ instance, err := r.store.UpdateInstance(r.ctx, runnerName, updateParams)
+ if err != nil {
+ return params.Instance{}, fmt.Errorf("error updating runner state: %w", err)
}
- return nil
+ return instance, nil
}
-func (r *basePoolManager) acquireNewInstance(job params.WorkflowJob) error {
- requestedLabels := job.WorkflowJob.Labels
- if len(requestedLabels) == 0 {
- // no labels were requested.
- return nil
- }
-
- pool, err := r.helper.FindPoolByTags(requestedLabels)
+func (r *basePoolManager) AddRunner(ctx context.Context, poolID string, aditionalLabels []string) (err error) {
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, poolID)
if err != nil {
- if errors.Is(err, runnerErrors.ErrNotFound) {
- log.Printf("failed to find an enabled pool with required labels: %s", strings.Join(requestedLabels, ", "))
- return nil
- }
- return errors.Wrap(err, "fetching suitable pool")
- }
- log.Printf("adding new runner with requested tags %s in pool %s", util.SanitizeLogEntry(strings.Join(job.WorkflowJob.Labels, ", ")), util.SanitizeLogEntry(pool.ID))
-
- if !pool.Enabled {
- log.Printf("selected pool (%s) is disabled", pool.ID)
- return nil
+ return fmt.Errorf("error fetching pool: %w", err)
}
- poolInstances, err := r.store.PoolInstanceCount(r.ctx, pool.ID)
- if err != nil {
- return errors.Wrap(err, "fetching instances")
- }
-
- if poolInstances >= int64(pool.MaxRunners) {
- log.Printf("max_runners (%d) reached for pool %s, skipping...", pool.MaxRunners, pool.ID)
- return nil
- }
-
- instances, err := r.store.ListPoolInstances(r.ctx, pool.ID)
- if err != nil {
- return errors.Wrap(err, "fetching instances")
- }
-
- idleWorkers := 0
- for _, inst := range instances {
- if providerCommon.RunnerStatus(inst.RunnerStatus) == providerCommon.RunnerIdle &&
- providerCommon.InstanceStatus(inst.Status) == providerCommon.InstanceRunning {
- idleWorkers++
- }
- }
-
- // Skip creating a new runner if we have at least one idle runner and the minimum is already satisfied.
- // This should work even for pools that define a MinIdleRunner of 0.
- if int64(idleWorkers) > 0 && int64(idleWorkers) >= int64(pool.MinIdleRunners) {
- log.Printf("we have enough min_idle_runners (%d) for pool %s, skipping...", pool.MinIdleRunners, pool.ID)
- return nil
- }
-
- if err := r.AddRunner(r.ctx, pool.ID); err != nil {
- log.Printf("failed to add runner to pool %s", pool.ID)
- return errors.Wrap(err, "adding runner")
- }
- return nil
-}
-
-func (r *basePoolManager) AddRunner(ctx context.Context, poolID string) error {
- pool, err := r.helper.GetPoolByID(poolID)
- if err != nil {
- return errors.Wrap(err, "fetching pool")
+ provider, ok := r.providers[pool.ProviderName]
+ if !ok {
+ return fmt.Errorf("unknown provider %s for pool %s", pool.ProviderName, pool.ID)
}
name := fmt.Sprintf("%s-%s", pool.GetRunnerPrefix(), util.NewID())
+ labels := r.getLabelsForInstance(pool)
+
+ jitConfig := make(map[string]string)
+ var runner *github.Runner
+
+ if !provider.DisableJITConfig() && r.entity.Credentials.ForgeType != params.GiteaEndpointType {
+ jitConfig, runner, err = r.ghcli.GetEntityJITConfig(ctx, name, pool, labels)
+ if err != nil {
+ return fmt.Errorf("failed to generate JIT config: %w", err)
+ }
+ }
createParams := params.CreateInstanceParams{
Name: name,
- Status: providerCommon.InstancePendingCreate,
- RunnerStatus: providerCommon.RunnerPending,
+ Status: commonParams.InstancePendingCreate,
+ RunnerStatus: params.RunnerPending,
OSArch: pool.OSArch,
OSType: pool.OSType,
- CallbackURL: r.helper.GetCallbackURL(),
- MetadataURL: r.helper.GetMetadataURL(),
+ CallbackURL: r.controllerInfo.CallbackURL,
+ MetadataURL: r.controllerInfo.MetadataURL,
CreateAttempt: 1,
GitHubRunnerGroup: pool.GitHubRunnerGroup,
+ AditionalLabels: aditionalLabels,
+ JitConfiguration: jitConfig,
}
- _, err = r.store.CreateInstance(r.ctx, poolID, createParams)
- if err != nil {
- return errors.Wrap(err, "creating instance")
+ if runner != nil {
+ createParams.AgentID = runner.GetID()
}
+ instance, err := r.store.CreateInstance(r.ctx, poolID, createParams)
+ if err != nil {
+ return fmt.Errorf("error creating instance: %w", err)
+ }
+
+ defer func() {
+ if err != nil {
+ if instance.ID != "" {
+ if err := r.DeleteRunner(instance, false, false); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to cleanup instance",
+ "runner_name", instance.Name)
+ }
+ }
+
+ if runner != nil {
+ runnerCleanupErr := r.ghcli.RemoveEntityRunner(r.ctx, runner.GetID())
+ if err != nil {
+ slog.With(slog.Any("error", runnerCleanupErr)).ErrorContext(
+ ctx, "failed to remove runner",
+ "gh_runner_id", runner.GetID())
+ }
+ }
+ }
+ }()
+
return nil
}
@@ -678,26 +840,39 @@ func (r *basePoolManager) Status() params.PoolManagerStatus {
}
}
-func (r *basePoolManager) waitForTimeoutOrCanceled(timeout time.Duration) {
- log.Printf("sleeping for %.2f minutes", timeout.Minutes())
+func (r *basePoolManager) waitForTimeoutOrCancelled(timeout time.Duration) {
+ slog.DebugContext(
+ r.ctx, fmt.Sprintf("sleeping for %.2f minutes", timeout.Minutes()))
+ timer := time.NewTimer(timeout)
+ defer timer.Stop()
select {
- case <-time.After(timeout):
+ case <-timer.C:
case <-r.ctx.Done():
case <-r.quit:
}
}
-func (r *basePoolManager) setPoolRunningState(isRunning bool, failureReason string) {
+func (r *basePoolManager) SetPoolRunningState(isRunning bool, failureReason string) {
r.mux.Lock()
r.managerErrorReason = failureReason
r.managerIsRunning = isRunning
r.mux.Unlock()
}
+func (r *basePoolManager) getLabelsForInstance(pool params.Pool) []string {
+ labels := []string{}
+ for _, tag := range pool.Tags {
+ labels = append(labels, tag.Name)
+ }
+ labels = append(labels, r.controllerLabel())
+ labels = append(labels, r.poolLabel(pool.ID))
+ return labels
+}
+
func (r *basePoolManager) addInstanceToProvider(instance params.Instance) error {
- pool, err := r.helper.GetPoolByID(instance.PoolID)
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, instance.PoolID)
if err != nil {
- return errors.Wrap(err, "fetching pool")
+ return fmt.Errorf("error fetching pool: %w", err)
}
provider, ok := r.providers[pool.ProviderName]
@@ -705,25 +880,19 @@ func (r *basePoolManager) addInstanceToProvider(instance params.Instance) error
return fmt.Errorf("unknown provider %s for pool %s", pool.ProviderName, pool.ID)
}
- labels := []string{}
- for _, tag := range pool.Tags {
- labels = append(labels, tag.Name)
- }
- labels = append(labels, r.controllerLabel())
- labels = append(labels, r.poolLabel(pool.ID))
-
jwtValidity := pool.RunnerTimeout()
- entity := r.helper.String()
- jwtToken, err := auth.NewInstanceJWTToken(instance, r.helper.JwtToken(), entity, pool.PoolType(), jwtValidity)
+ jwtToken, err := r.instanceTokenGetter.NewInstanceJWTToken(instance, r.entity, pool.PoolType(), jwtValidity)
if err != nil {
- return errors.Wrap(err, "fetching instance jwt token")
+ return fmt.Errorf("error fetching instance jwt token: %w", err)
}
- bootstrapArgs := params.BootstrapInstance{
+ hasJITConfig := len(instance.JitConfiguration) > 0
+
+ bootstrapArgs := commonParams.BootstrapInstance{
Name: instance.Name,
Tools: r.tools,
- RepoURL: r.helper.GithubURL(),
+ RepoURL: r.entity.ForgeURL(),
MetadataURL: instance.MetadataURL,
CallbackURL: instance.CallbackURL,
InstanceToken: jwtToken,
@@ -732,31 +901,50 @@ func (r *basePoolManager) addInstanceToProvider(instance params.Instance) error
Flavor: pool.Flavor,
Image: pool.Image,
ExtraSpecs: pool.ExtraSpecs,
- Labels: labels,
PoolID: instance.PoolID,
- CACertBundle: r.credsDetails.CABundle,
+ CACertBundle: r.entity.Credentials.CABundle,
GitHubRunnerGroup: instance.GitHubRunnerGroup,
+ JitConfigEnabled: hasJITConfig,
+ }
+
+ if !hasJITConfig {
+ // We still need the labels here for situations where we don't have a JIT config generated.
+ // This can happen if GARM is used against an instance of GHES older than version 3.10.
+ // The labels field should be ignored by providers if JIT config is enabled.
+ bootstrapArgs.Labels = r.getLabelsForInstance(pool)
}
var instanceIDToDelete string
defer func() {
if instanceIDToDelete != "" {
- if err := provider.DeleteInstance(r.ctx, instanceIDToDelete); err != nil {
+ deleteInstanceParams := common.DeleteInstanceParams{
+ DeleteInstanceV011: common.DeleteInstanceV011Params{
+ ProviderBaseParams: r.getProviderBaseParams(pool),
+ },
+ }
+ if err := provider.DeleteInstance(r.ctx, instanceIDToDelete, deleteInstanceParams); err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- log.Printf("failed to cleanup instance: %s", instanceIDToDelete)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to cleanup instance",
+ "provider_id", instanceIDToDelete)
}
}
}
}()
- providerInstance, err := provider.CreateInstance(r.ctx, bootstrapArgs)
+ createInstanceParams := common.CreateInstanceParams{
+ CreateInstanceV011: common.CreateInstanceV011Params{
+ ProviderBaseParams: r.getProviderBaseParams(pool),
+ },
+ }
+ providerInstance, err := provider.CreateInstance(r.ctx, bootstrapArgs, createInstanceParams)
if err != nil {
instanceIDToDelete = instance.Name
- return errors.Wrap(err, "creating instance")
+ return fmt.Errorf("error creating instance: %w", err)
}
- if providerInstance.Status == providerCommon.InstanceError {
+ if providerInstance.Status == commonParams.InstanceError {
instanceIDToDelete = instance.ProviderID
if instanceIDToDelete == "" {
instanceIDToDelete = instance.Name
@@ -764,70 +952,96 @@ func (r *basePoolManager) addInstanceToProvider(instance params.Instance) error
}
updateInstanceArgs := r.updateArgsFromProviderInstance(providerInstance)
- if _, err := r.store.UpdateInstance(r.ctx, instance.ID, updateInstanceArgs); err != nil {
- return errors.Wrap(err, "updating instance")
+ if _, err := r.store.UpdateInstance(r.ctx, instance.Name, updateInstanceArgs); err != nil {
+ return fmt.Errorf("error updating instance: %w", err)
}
return nil
}
-func (r *basePoolManager) getRunnerDetailsFromJob(job params.WorkflowJob) (params.RunnerInfo, error) {
- runnerInfo := params.RunnerInfo{
- Name: job.WorkflowJob.RunnerName,
- Labels: job.WorkflowJob.Labels,
- }
-
- var err error
- if job.WorkflowJob.RunnerName == "" {
- // Runner name was not set in WorkflowJob by github. We can still attempt to
- // fetch the info we need, using the workflow run ID, from the API.
- log.Printf("runner name not found in workflow job, attempting to fetch from API")
- runnerInfo, err = r.helper.GetRunnerInfoFromWorkflow(job)
- if err != nil {
- return params.RunnerInfo{}, errors.Wrap(err, "fetching runner name from API")
- }
- }
-
- runnerDetails, err := r.store.GetInstanceByName(context.Background(), runnerInfo.Name)
+// paramsWorkflowJobToParamsJob returns a params.Job from a params.WorkflowJob, and aditionally determines
+// if the runner belongs to this pool or not. It will always return a valid params.Job, even if it errs out.
+// This allows us to still update the job in the database, even if we determined that it wasn't necessarily meant
+// for this pool.
+// If garm manages multiple hierarchies (repos, org, enterprise) which involve the same repo, we will get a hook
+// whenever a job involving our repo triggers a hook. So even if the job is picked up by a runner at the enterprise
+// level, the repo and org still get a hook.
+// We even get a hook if a particular job is picked up by a GitHub hosted runner. We don't know who will pick up the job
+// until the "in_progress" event is sent and we can see which runner picked it up.
+//
+// We save the details of that job at every level, because we want to at least update the status of the job. We make
+// decissions based on the status of saved jobs. A "queued" job will prompt garm to search for an appropriate pool
+// and spin up a runner there if no other idle runner exists to pick it up.
+func (r *basePoolManager) paramsWorkflowJobToParamsJob(job params.WorkflowJob) (params.Job, error) {
+ asUUID, err := uuid.Parse(r.ID())
if err != nil {
- log.Printf("could not find runner details for %s", util.SanitizeLogEntry(runnerInfo.Name))
- return params.RunnerInfo{}, errors.Wrap(err, "fetching runner details")
+ return params.Job{}, fmt.Errorf("error parsing pool ID as UUID: %w", err)
}
- if _, err := r.helper.GetPoolByID(runnerDetails.PoolID); err != nil {
- log.Printf("runner %s (pool ID: %s) does not belong to any pool we manage: %s", runnerDetails.Name, runnerDetails.PoolID, err)
- return params.RunnerInfo{}, errors.Wrap(err, "fetching pool for instance")
+ jobParams := params.Job{
+ WorkflowJobID: job.WorkflowJob.ID,
+ Action: job.Action,
+ RunID: job.WorkflowJob.RunID,
+ Status: job.WorkflowJob.Status,
+ Conclusion: job.WorkflowJob.Conclusion,
+ StartedAt: job.WorkflowJob.StartedAt,
+ CompletedAt: job.WorkflowJob.CompletedAt,
+ Name: job.WorkflowJob.Name,
+ GithubRunnerID: job.WorkflowJob.RunnerID,
+ RunnerName: job.WorkflowJob.RunnerName,
+ RunnerGroupID: job.WorkflowJob.RunnerGroupID,
+ RunnerGroupName: job.WorkflowJob.RunnerGroupName,
+ RepositoryName: job.Repository.Name,
+ RepositoryOwner: job.Repository.Owner.Login,
+ Labels: job.WorkflowJob.Labels,
}
- return runnerInfo, nil
+
+ switch r.entity.EntityType {
+ case params.ForgeEntityTypeEnterprise:
+ jobParams.EnterpriseID = &asUUID
+ case params.ForgeEntityTypeRepository:
+ jobParams.RepoID = &asUUID
+ case params.ForgeEntityTypeOrganization:
+ jobParams.OrgID = &asUUID
+ default:
+ return jobParams, fmt.Errorf("unknown pool type: %s", r.entity.EntityType)
+ }
+
+ return jobParams, nil
}
func (r *basePoolManager) poolLabel(poolID string) string {
- return fmt.Sprintf("%s%s", poolIDLabelprefix, poolID)
+ return fmt.Sprintf("%s=%s", poolIDLabelprefix, poolID)
}
func (r *basePoolManager) controllerLabel() string {
- return fmt.Sprintf("%s%s", controllerLabelPrefix, r.controllerID)
+ return fmt.Sprintf("%s=%s", controllerLabelPrefix, r.controllerInfo.ControllerID.String())
}
-func (r *basePoolManager) updateArgsFromProviderInstance(providerInstance params.Instance) params.UpdateInstanceParams {
+func (r *basePoolManager) updateArgsFromProviderInstance(providerInstance commonParams.ProviderInstance) params.UpdateInstanceParams {
return params.UpdateInstanceParams{
ProviderID: providerInstance.ProviderID,
OSName: providerInstance.OSName,
OSVersion: providerInstance.OSVersion,
Addresses: providerInstance.Addresses,
Status: providerInstance.Status,
- RunnerStatus: providerInstance.RunnerStatus,
ProviderFault: providerInstance.ProviderFault,
}
}
-func (r *basePoolManager) scaleDownOnePool(pool params.Pool) {
+
+func (r *basePoolManager) scaleDownOnePool(ctx context.Context, pool params.Pool) error {
+ slog.DebugContext(
+ ctx, "scaling down pool",
+ "pool_id", pool.ID)
if !pool.Enabled {
- return
+ slog.DebugContext(
+ ctx, "pool is disabled, skipping scale down",
+ "pool_id", pool.ID)
+ return nil
}
existingInstances, err := r.store.ListPoolInstances(r.ctx, pool.ID)
if err != nil {
- log.Printf("failed to ensure minimum idle workers for pool %s: %s", pool.ID, err)
- return
+ return fmt.Errorf("failed to ensure minimum idle workers for pool %s: %w", pool.ID, err)
}
idleWorkers := []params.Instance{}
@@ -836,108 +1050,207 @@ func (r *basePoolManager) scaleDownOnePool(pool params.Pool) {
// consideration for scale-down. The 5 minute grace period prevents a situation where a
// "queued" workflow triggers the creation of a new idle runner, and this routine reaps
// an idle runner before they have a chance to pick up a job.
- if providerCommon.RunnerStatus(inst.RunnerStatus) == providerCommon.RunnerIdle &&
- providerCommon.InstanceStatus(inst.Status) == providerCommon.InstanceRunning &&
- time.Since(inst.UpdatedAt).Minutes() > 5 {
+ if inst.RunnerStatus == params.RunnerIdle && inst.Status == commonParams.InstanceRunning && time.Since(inst.UpdatedAt).Minutes() > 2 {
idleWorkers = append(idleWorkers, inst)
}
}
if len(idleWorkers) == 0 {
- return
+ return nil
}
- surplus := float64(len(idleWorkers) - int(pool.MinIdleRunners))
+ surplus := float64(len(idleWorkers) - pool.MinIdleRunnersAsInt())
if surplus <= 0 {
- return
+ return nil
}
scaleDownFactor := 0.5 // could be configurable
numScaleDown := int(math.Ceil(surplus * scaleDownFactor))
if numScaleDown <= 0 || numScaleDown > len(idleWorkers) {
- log.Printf("invalid number of instances to scale down: %v, check your scaleDownFactor: %v\n", numScaleDown, scaleDownFactor)
- return
+ return fmt.Errorf("invalid number of instances to scale down: %v, check your scaleDownFactor: %v", numScaleDown, scaleDownFactor)
}
+ g, _ := errgroup.WithContext(ctx)
+
for _, instanceToDelete := range idleWorkers[:numScaleDown] {
- log.Printf("scaling down idle worker %s from pool %s\n", instanceToDelete.Name, pool.ID)
- if err := r.ForceDeleteRunner(instanceToDelete); err != nil {
- log.Printf("failed to delete instance %s: %s", instanceToDelete.ID, err)
+ instanceToDelete := instanceToDelete
+
+ lockAcquired := locking.TryLock(instanceToDelete.Name, r.consumerID)
+ if !lockAcquired {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to acquire lock for instance",
+ "provider_id", instanceToDelete.Name)
+ continue
+ }
+ defer locking.Unlock(instanceToDelete.Name, false)
+
+ g.Go(func() error {
+ slog.InfoContext(
+ ctx, "scaling down idle worker from pool",
+ "runner_name", instanceToDelete.Name,
+ "pool_id", pool.ID)
+ if err := r.DeleteRunner(instanceToDelete, false, false); err != nil {
+ return fmt.Errorf("failed to delete instance %s: %w", instanceToDelete.ID, err)
+ }
+ return nil
+ })
+ }
+
+ if numScaleDown > 0 {
+ // We just scaled down a runner for this pool. That means that if we have jobs that are
+ // still queued in our DB, and those jobs should match this pool but have not been picked
+ // up by a runner, they are most likely stale and can be removed. For now, we can simply
+ // remove jobs older than 10 minutes.
+ //
+ // nolint:golangci-lint,godox
+ // TODO: should probably allow aditional filters to list functions. Would help to filter by date
+ // instead of returning a bunch of results and filtering manually.
+ queued, err := r.store.ListEntityJobsByStatus(r.ctx, r.entity.EntityType, r.entity.ID, params.JobStatusQueued)
+ if err != nil && !errors.Is(err, runnerErrors.ErrNotFound) {
+ return fmt.Errorf("error listing queued jobs: %w", err)
+ }
+
+ for _, job := range queued {
+ if time.Since(job.CreatedAt).Minutes() > 10 && pool.HasRequiredLabels(job.Labels) {
+ if err := r.store.DeleteJob(ctx, job.WorkflowJobID); err != nil && !errors.Is(err, runnerErrors.ErrNotFound) {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to delete job",
+ "job_id", job.WorkflowJobID)
+ }
+ }
}
}
+
+ if err := r.waitForErrorGroupOrContextCancelled(g); err != nil {
+ return fmt.Errorf("failed to scale down pool %s: %w", pool.ID, err)
+ }
+ return nil
}
-func (r *basePoolManager) ensureIdleRunnersForOnePool(pool params.Pool) {
+func (r *basePoolManager) addRunnerToPool(pool params.Pool, aditionalLabels []string) error {
if !pool.Enabled {
- return
+ return fmt.Errorf("pool %s is disabled", pool.ID)
}
+
+ poolInstanceCount, err := r.store.PoolInstanceCount(r.ctx, pool.ID)
+ if err != nil {
+ return fmt.Errorf("failed to list pool instances: %w", err)
+ }
+
+ if poolInstanceCount >= int64(pool.MaxRunnersAsInt()) {
+ return fmt.Errorf("max workers (%d) reached for pool %s", pool.MaxRunners, pool.ID)
+ }
+
+ if err := r.AddRunner(r.ctx, pool.ID, aditionalLabels); err != nil {
+ return fmt.Errorf("failed to add new instance for pool %s: %s", pool.ID, err)
+ }
+ return nil
+}
+
+func (r *basePoolManager) ensureIdleRunnersForOnePool(pool params.Pool) error {
+ if !pool.Enabled || pool.MinIdleRunners == 0 {
+ return nil
+ }
+
existingInstances, err := r.store.ListPoolInstances(r.ctx, pool.ID)
if err != nil {
- log.Printf("failed to ensure minimum idle workers for pool %s: %s", pool.ID, err)
- return
+ return fmt.Errorf("failed to ensure minimum idle workers for pool %s: %w", pool.ID, err)
}
if uint(len(existingInstances)) >= pool.MaxRunners {
- log.Printf("max workers (%d) reached for pool %s, skipping idle worker creation", pool.MaxRunners, pool.ID)
- return
+ slog.DebugContext(
+ r.ctx, "max workers reached for pool, skipping idle worker creation",
+ "max_runners", pool.MaxRunners,
+ "pool_id", pool.ID)
+ return nil
}
idleOrPendingWorkers := []params.Instance{}
for _, inst := range existingInstances {
- if providerCommon.RunnerStatus(inst.RunnerStatus) != providerCommon.RunnerActive {
+ if inst.RunnerStatus != params.RunnerActive && inst.RunnerStatus != params.RunnerTerminated {
idleOrPendingWorkers = append(idleOrPendingWorkers, inst)
}
}
var required int
- if len(idleOrPendingWorkers) < int(pool.MinIdleRunners) {
+ if len(idleOrPendingWorkers) < pool.MinIdleRunnersAsInt() {
// get the needed delta.
- required = int(pool.MinIdleRunners) - len(idleOrPendingWorkers)
+ required = pool.MinIdleRunnersAsInt() - len(idleOrPendingWorkers)
projectedInstanceCount := len(existingInstances) + required
- if uint(projectedInstanceCount) > pool.MaxRunners {
+
+ var projected uint
+ if projectedInstanceCount > 0 {
+ projected = uint(projectedInstanceCount)
+ }
+ if projected > pool.MaxRunners {
// ensure we don't go above max workers
- delta := projectedInstanceCount - int(pool.MaxRunners)
- required = required - delta
+ delta := projectedInstanceCount - pool.MaxRunnersAsInt()
+ required -= delta
}
}
for i := 0; i < required; i++ {
- log.Printf("adding new idle worker to pool %s", pool.ID)
- if err := r.AddRunner(r.ctx, pool.ID); err != nil {
- log.Printf("failed to add new instance for pool %s: %s", pool.ID, err)
+ slog.InfoContext(
+ r.ctx, "adding new idle worker to pool",
+ "pool_id", pool.ID)
+ if err := r.AddRunner(r.ctx, pool.ID, nil); err != nil {
+ return fmt.Errorf("failed to add new instance for pool %s: %w", pool.ID, err)
}
}
+ return nil
}
-func (r *basePoolManager) retryFailedInstancesForOnePool(pool params.Pool) {
+func (r *basePoolManager) retryFailedInstancesForOnePool(ctx context.Context, pool params.Pool) error {
if !pool.Enabled {
- return
+ return nil
}
+ slog.DebugContext(
+ ctx, "running retry failed instances for pool",
+ "pool_id", pool.ID)
existingInstances, err := r.store.ListPoolInstances(r.ctx, pool.ID)
if err != nil {
- log.Printf("retrying failed instances: failed to list instances for pool %s: %s", pool.ID, err)
- return
+ return fmt.Errorf("failed to list instances for pool %s: %w", pool.ID, err)
}
- g, _ := errgroup.WithContext(r.ctx)
+ g, errCtx := errgroup.WithContext(ctx)
for _, instance := range existingInstances {
- if instance.Status != providerCommon.InstanceError {
+ instance := instance
+
+ if instance.Status != commonParams.InstanceError {
continue
}
if instance.CreateAttempt >= maxCreateAttempts {
continue
}
- instance := instance
+
+ slog.DebugContext(
+ ctx, "attempting to retry failed instance",
+ "runner_name", instance.Name)
+ lockAcquired := locking.TryLock(instance.Name, r.consumerID)
+ if !lockAcquired {
+ slog.DebugContext(
+ ctx, "failed to acquire lock for instance",
+ "runner_name", instance.Name)
+ continue
+ }
+
g.Go(func() error {
+ defer locking.Unlock(instance.Name, false)
+ slog.DebugContext(
+ ctx, "attempting to clean up any previous instance",
+ "runner_name", instance.Name)
+ // nolint:golangci-lint,godox
// NOTE(gabriel-samfira): this is done in parallel. If there are many failed instances
// this has the potential to create many API requests to the target provider.
// TODO(gabriel-samfira): implement request throttling.
- if err := r.deleteInstanceFromProvider(instance); err != nil {
- log.Printf("failed to delete instance %s from provider: %s", instance.Name, err)
+ if err := r.deleteInstanceFromProvider(errCtx, instance); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to delete instance from provider",
+ "runner_name", instance.Name)
// Bail here, otherwise we risk creating multiple failing instances, and losing track
// of them. If Create instance failed to return a proper provider ID, we rely on the
// name to delete the instance. If we don't bail here, and end up with multiple
@@ -947,89 +1260,101 @@ func (r *basePoolManager) retryFailedInstancesForOnePool(pool params.Pool) {
// which we would rather avoid.
return err
}
-
+ slog.DebugContext(
+ ctx, "cleanup of previously failed instance complete",
+ "runner_name", instance.Name)
+ // nolint:golangci-lint,godox
// TODO(gabriel-samfira): Incrementing CreateAttempt should be done within a transaction.
// It's fairly safe to do here (for now), as there should be no other code path that updates
// an instance in this state.
- var tokenFetched bool = false
+ var tokenFetched bool = len(instance.JitConfiguration) > 0
updateParams := params.UpdateInstanceParams{
CreateAttempt: instance.CreateAttempt + 1,
TokenFetched: &tokenFetched,
- Status: providerCommon.InstancePendingCreate,
+ Status: commonParams.InstancePendingCreate,
+ RunnerStatus: params.RunnerPending,
}
- log.Printf("queueing previously failed instance %s for retry", instance.Name)
+ slog.DebugContext(
+ ctx, "queueing previously failed instance for retry",
+ "runner_name", instance.Name)
// Set instance to pending create and wait for retry.
- if err := r.updateInstance(instance.Name, updateParams); err != nil {
- log.Printf("failed to update runner %s status", instance.Name)
+ if _, err := r.store.UpdateInstance(r.ctx, instance.Name, updateParams); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to update runner status",
+ "runner_name", instance.Name)
}
return nil
})
}
- if err := g.Wait(); err != nil {
- log.Printf("failed to retry failed instances for pool %s: %s", pool.ID, err)
+ if err := r.waitForErrorGroupOrContextCancelled(g); err != nil {
+ return fmt.Errorf("failed to retry failed instances for pool %s: %w", pool.ID, err)
}
+ return nil
}
-func (r *basePoolManager) retryFailedInstances() {
- pools, err := r.helper.ListPools()
- if err != nil {
- log.Printf("error listing pools: %s", err)
- return
- }
- wg := sync.WaitGroup{}
- wg.Add(len(pools))
+func (r *basePoolManager) retryFailedInstances() error {
+ pools := cache.GetEntityPools(r.entity.ID)
+ g, ctx := errgroup.WithContext(r.ctx)
for _, pool := range pools {
- go func(pool params.Pool) {
- defer wg.Done()
- r.retryFailedInstancesForOnePool(pool)
- }(pool)
+ pool := pool
+ g.Go(func() error {
+ if err := r.retryFailedInstancesForOnePool(ctx, pool); err != nil {
+ return fmt.Errorf("retrying failed instances for pool %s: %w", pool.ID, err)
+ }
+ return nil
+ })
}
- wg.Wait()
+
+ if err := r.waitForErrorGroupOrContextCancelled(g); err != nil {
+ return fmt.Errorf("retrying failed instances: %w", err)
+ }
+
+ return nil
}
-func (r *basePoolManager) scaleDown() {
- pools, err := r.helper.ListPools()
- if err != nil {
- log.Printf("error listing pools: %s", err)
- return
- }
- wg := sync.WaitGroup{}
- wg.Add(len(pools))
+func (r *basePoolManager) scaleDown() error {
+ pools := cache.GetEntityPools(r.entity.ID)
+ g, ctx := errgroup.WithContext(r.ctx)
for _, pool := range pools {
- go func(pool params.Pool) {
- defer wg.Done()
- r.scaleDownOnePool(pool)
- }(pool)
+ pool := pool
+ g.Go(func() error {
+ slog.DebugContext(
+ ctx, "running scale down for pool",
+ "pool_id", pool.ID)
+ return r.scaleDownOnePool(ctx, pool)
+ })
}
- wg.Wait()
+ if err := r.waitForErrorGroupOrContextCancelled(g); err != nil {
+ return fmt.Errorf("failed to scale down: %w", err)
+ }
+ return nil
}
-func (r *basePoolManager) ensureMinIdleRunners() {
- pools, err := r.helper.ListPools()
- if err != nil {
- log.Printf("error listing pools: %s", err)
- return
- }
- wg := sync.WaitGroup{}
- wg.Add(len(pools))
+func (r *basePoolManager) ensureMinIdleRunners() error {
+ pools := cache.GetEntityPools(r.entity.ID)
+ g, _ := errgroup.WithContext(r.ctx)
for _, pool := range pools {
- go func(pool params.Pool) {
- defer wg.Done()
- r.ensureIdleRunnersForOnePool(pool)
- }(pool)
+ pool := pool
+ g.Go(func() error {
+ return r.ensureIdleRunnersForOnePool(pool)
+ })
}
- wg.Wait()
+
+ if err := r.waitForErrorGroupOrContextCancelled(g); err != nil {
+ return fmt.Errorf("failed to ensure minimum idle workers: %w", err)
+ }
+ return nil
}
-func (r *basePoolManager) deleteInstanceFromProvider(instance params.Instance) error {
- pool, err := r.helper.GetPoolByID(instance.PoolID)
+func (r *basePoolManager) deleteInstanceFromProvider(ctx context.Context, instance params.Instance) error {
+ pool, err := r.store.GetEntityPool(r.ctx, r.entity, instance.PoolID)
if err != nil {
- return errors.Wrap(err, "fetching pool")
+ return fmt.Errorf("error fetching pool: %w", err)
}
- provider, ok := r.providers[pool.ProviderName]
+ provider, ok := r.providers[instance.ProviderName]
if !ok {
- return fmt.Errorf("unknown provider %s for pool %s", pool.ProviderName, pool.ID)
+ return fmt.Errorf("unknown provider %s for pool %s", instance.ProviderName, instance.PoolID)
}
identifier := instance.ProviderID
@@ -1039,155 +1364,309 @@ func (r *basePoolManager) deleteInstanceFromProvider(instance params.Instance) e
identifier = instance.Name
}
- if err := provider.DeleteInstance(r.ctx, identifier); err != nil {
- return errors.Wrap(err, "removing instance")
+ slog.DebugContext(
+ ctx, "calling delete instance on provider",
+ "runner_name", instance.Name,
+ "provider_id", identifier)
+
+ deleteInstanceParams := common.DeleteInstanceParams{
+ DeleteInstanceV011: common.DeleteInstanceV011Params{
+ ProviderBaseParams: r.getProviderBaseParams(pool),
+ },
+ }
+ if err := provider.DeleteInstance(ctx, identifier, deleteInstanceParams); err != nil {
+ return fmt.Errorf("error removing instance: %w", err)
}
return nil
}
-func (r *basePoolManager) deletePendingInstances() {
- instances, err := r.helper.FetchDbInstances()
- if err != nil {
- log.Printf("failed to fetch instances from store: %s", err)
- return
+func (r *basePoolManager) sleepWithCancel(sleepTime time.Duration) (canceled bool) {
+ if sleepTime == 0 {
+ return false
}
- g, ctx := errgroup.WithContext(r.ctx)
+ ticker := time.NewTicker(sleepTime)
+ defer ticker.Stop()
+
+ select {
+ case <-ticker.C:
+ return false
+ case <-r.quit:
+ case <-r.ctx.Done():
+ }
+ return true
+}
+
+func (r *basePoolManager) deletePendingInstances() error {
+ instances, err := r.store.ListEntityInstances(r.ctx, r.entity)
+ if err != nil {
+ return fmt.Errorf("failed to fetch instances from store: %w", err)
+ }
+
+ slog.DebugContext(
+ r.ctx, "removing instances in pending_delete")
for _, instance := range instances {
- if instance.Status != providerCommon.InstancePendingDelete {
+ if instance.ScaleSetID != 0 {
+ // instance is part of a scale set. Skip.
+ continue
+ }
+
+ if instance.Status != commonParams.InstancePendingDelete && instance.Status != commonParams.InstancePendingForceDelete {
// not in pending_delete status. Skip.
continue
}
- // Set the status to deleting before launching the goroutine that removes
- // the runner from the provider (which can take a long time).
- if err := r.setInstanceStatus(instance.Name, providerCommon.InstanceDeleting, nil); err != nil {
- log.Printf("failed to update runner %s status", instance.Name)
+ slog.InfoContext(
+ r.ctx, "removing instance from pool",
+ "runner_name", instance.Name,
+ "pool_id", instance.PoolID)
+ lockAcquired := locking.TryLock(instance.Name, r.consumerID)
+ if !lockAcquired {
+ slog.InfoContext(
+ r.ctx, "failed to acquire lock for instance",
+ "runner_name", instance.Name)
+ continue
}
- instance := instance
- g.Go(func() (err error) {
+
+ shouldProcess, deadline := r.backoff.ShouldProcess(instance.Name)
+ if !shouldProcess {
+ slog.DebugContext(
+ r.ctx, "backoff in effect for instance",
+ "runner_name", instance.Name, "deadline", deadline)
+ locking.Unlock(instance.Name, false)
+ continue
+ }
+
+ go func(instance params.Instance) (err error) {
+ // Prevent Thundering Herd. Should alleviate some of the database
+ // is locked errors in sqlite3.
+ num, err := rand.Int(rand.Reader, big.NewInt(2000))
+ if err != nil {
+ return fmt.Errorf("failed to generate random number: %w", err)
+ }
+ jitter := time.Duration(num.Int64()) * time.Millisecond
+ if canceled := r.sleepWithCancel(jitter); canceled {
+ return nil
+ }
+
+ currentStatus := instance.Status
+ deleteMux := false
+ defer func() {
+ locking.Unlock(instance.Name, deleteMux)
+ if deleteMux {
+ // deleteMux is set only when the instance was successfully removed.
+ // We can use it as a marker to signal that the backoff is no longer
+ // needed.
+ r.backoff.Delete(instance.Name)
+ }
+ }()
defer func(instance params.Instance) {
if err != nil {
- // failed to remove from provider. Set the status back to pending_delete, which
- // will retry the operation.
- if err := r.setInstanceStatus(instance.Name, providerCommon.InstancePendingDelete, nil); err != nil {
- log.Printf("failed to update runner %s status", instance.Name)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to remove instance",
+ "runner_name", instance.Name)
+ // failed to remove from provider. Set status to previous value, which will retry
+ // the operation.
+ if _, err := r.setInstanceStatus(instance.Name, currentStatus, []byte(err.Error())); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", instance.Name)
}
+ r.backoff.RecordFailure(instance.Name)
}
}(instance)
- err = r.deleteInstanceFromProvider(instance)
- if err != nil {
- return errors.Wrap(err, "removing instance from provider")
+ if _, err := r.setInstanceStatus(instance.Name, commonParams.InstanceDeleting, nil); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", instance.Name)
+ return err
}
- if deleteErr := r.store.DeleteInstance(ctx, instance.PoolID, instance.Name); deleteErr != nil {
- return errors.Wrap(deleteErr, "deleting instance from database")
+ slog.DebugContext(
+ r.ctx, "removing instance from provider",
+ "runner_name", instance.Name)
+ err = r.deleteInstanceFromProvider(r.ctx, instance)
+ if err != nil {
+ if currentStatus != commonParams.InstancePendingForceDelete {
+ return fmt.Errorf("failed to remove instance from provider: %w", err)
+ }
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to remove instance from provider (continuing anyway)",
+ "instance", instance.Name)
}
- return
- })
- }
- if err := g.Wait(); err != nil {
- log.Printf("failed to delete pending instances: %s", err)
+ slog.InfoContext(
+ r.ctx, "removing instance from database",
+ "runner_name", instance.Name)
+ if deleteErr := r.store.DeleteInstance(r.ctx, instance.PoolID, instance.Name); deleteErr != nil {
+ return fmt.Errorf("failed to delete instance from database: %w", deleteErr)
+ }
+ deleteMux = true
+ slog.InfoContext(
+ r.ctx, "instance was successfully removed",
+ "runner_name", instance.Name)
+ return nil
+ }(instance) //nolint
}
+
+ return nil
}
-func (r *basePoolManager) addPendingInstances() {
+func (r *basePoolManager) addPendingInstances() error {
+ // nolint:golangci-lint,godox
// TODO: filter instances by status.
- instances, err := r.helper.FetchDbInstances()
+ instances, err := r.store.ListEntityInstances(r.ctx, r.entity)
if err != nil {
- log.Printf("failed to fetch instances from store: %s", err)
- return
+ return fmt.Errorf("failed to fetch instances from store: %w", err)
}
- g, _ := errgroup.WithContext(r.ctx)
for _, instance := range instances {
- if instance.Status != providerCommon.InstancePendingCreate {
+ if instance.ScaleSetID != 0 {
+ // instance is part of a scale set. Skip.
+ continue
+ }
+
+ if instance.Status != commonParams.InstancePendingCreate {
// not in pending_create status. Skip.
continue
}
+
+ slog.DebugContext(
+ r.ctx, "attempting to acquire lock for instance",
+ "runner_name", instance.Name,
+ "action", "create_pending")
+ lockAcquired := locking.TryLock(instance.Name, r.consumerID)
+ if !lockAcquired {
+ slog.DebugContext(
+ r.ctx, "failed to acquire lock for instance",
+ "runner_name", instance.Name)
+ continue
+ }
+
// Set the instance to "creating" before launching the goroutine. This will ensure that addPendingInstances()
// won't attempt to create the runner a second time.
- if err := r.setInstanceStatus(instance.Name, providerCommon.InstanceCreating, nil); err != nil {
- log.Printf("failed to update runner %s status: %s", instance.Name, err)
+ if _, err := r.setInstanceStatus(instance.Name, commonParams.InstanceCreating, nil); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", instance.Name)
+ locking.Unlock(instance.Name, false)
// We failed to transition the instance to Creating. This means that garm will retry to create this instance
// when the loop runs again and we end up with multiple instances.
continue
}
- instance := instance
- g.Go(func() error {
- log.Printf("creating instance %s in pool %s", instance.Name, instance.PoolID)
+
+ go func(instance params.Instance) {
+ defer locking.Unlock(instance.Name, false)
+ slog.InfoContext(
+ r.ctx, "creating instance in pool",
+ "runner_name", instance.Name,
+ "pool_id", instance.PoolID)
if err := r.addInstanceToProvider(instance); err != nil {
- log.Printf("failed to add instance to provider: %s", err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to add instance to provider",
+ "runner_name", instance.Name)
errAsBytes := []byte(err.Error())
- if err := r.setInstanceStatus(instance.Name, providerCommon.InstanceError, errAsBytes); err != nil {
- log.Printf("failed to update runner %s status", instance.Name)
+ if _, statusErr := r.setInstanceStatus(instance.Name, commonParams.InstanceError, errAsBytes); statusErr != nil {
+ slog.With(slog.Any("error", statusErr)).ErrorContext(
+ r.ctx, "failed to update runner status",
+ "runner_name", instance.Name)
}
- log.Printf("failed to create instance in provider: %s", err)
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to create instance in provider",
+ "runner_name", instance.Name)
}
- return nil
- })
+ }(instance)
}
- if err := g.Wait(); err != nil {
- log.Printf("failed to add pending instances: %s", err)
- }
-}
-func (r *basePoolManager) consolidate() {
- // TODO(gabriel-samfira): replace this with something more efficient.
- r.mux.Lock()
- defer r.mux.Unlock()
-
- wg := sync.WaitGroup{}
- wg.Add(2)
- go func() {
- defer wg.Done()
- r.deletePendingInstances()
- }()
- go func() {
- defer wg.Done()
- r.addPendingInstances()
- }()
- wg.Wait()
-
- wg.Add(2)
- go func() {
- defer wg.Done()
- r.ensureMinIdleRunners()
- }()
-
- go func() {
- defer wg.Done()
- r.retryFailedInstances()
- }()
- wg.Wait()
+ return nil
}
func (r *basePoolManager) Wait() error {
+ done := make(chan struct{})
+ timer := time.NewTimer(60 * time.Second)
+ go func() {
+ r.wg.Wait()
+ timer.Stop()
+ close(done)
+ }()
select {
- case <-r.done:
- case <-time.After(60 * time.Second):
- return errors.Wrap(runnerErrors.ErrTimeout, "waiting for pool to stop")
+ case <-done:
+ case <-timer.C:
+ return runnerErrors.NewTimeoutError("waiting for pool to stop")
}
return nil
}
-func (r *basePoolManager) runnerCleanup() error {
- runners, err := r.helper.GetGithubRunners()
+func (r *basePoolManager) runnerCleanup() (err error) {
+ slog.DebugContext(
+ r.ctx, "running runner cleanup")
+ runners, err := r.GetGithubRunners()
if err != nil {
- return errors.Wrap(err, "fetching github runners")
+ return fmt.Errorf("failed to fetch github runners: %w", err)
}
+
+ if err := r.reapTimedOutRunners(runners); err != nil {
+ return fmt.Errorf("failed to reap timed out runners: %w", err)
+ }
+
+ if err := r.cleanupOrphanedRunners(runners); err != nil {
+ return fmt.Errorf("failed to cleanup orphaned runners: %w", err)
+ }
+
+ return nil
+}
+
+func (r *basePoolManager) cleanupOrphanedRunners(runners []forgeRunner) error {
if err := r.cleanupOrphanedProviderRunners(runners); err != nil {
- return errors.Wrap(err, "cleaning orphaned instances")
+ return fmt.Errorf("error cleaning orphaned instances: %w", err)
}
if err := r.cleanupOrphanedGithubRunners(runners); err != nil {
- return errors.Wrap(err, "cleaning orphaned github runners")
+ return fmt.Errorf("error cleaning orphaned github runners: %w", err)
}
+
return nil
}
func (r *basePoolManager) Start() error {
- go r.loop()
+ initialToolUpdate := make(chan struct{}, 1)
+ go func() {
+ slog.Info("running initial tool update")
+ for {
+ slog.DebugContext(r.ctx, "waiting for tools to be available")
+ hasTools, stopped := r.waitForToolsOrCancel()
+ if stopped {
+ return
+ }
+ if hasTools {
+ break
+ }
+ }
+ if err := r.updateTools(); err != nil {
+ slog.With(slog.Any("error", err)).Error("failed to update tools")
+ }
+ initialToolUpdate <- struct{}{}
+ }()
+
+ go r.runWatcher()
+ go func() {
+ select {
+ case <-r.quit:
+ return
+ case <-r.ctx.Done():
+ return
+ case <-initialToolUpdate:
+ }
+ defer close(initialToolUpdate)
+ go r.startLoopForFunction(r.runnerCleanup, common.PoolReapTimeoutInterval, "timeout_reaper", false)
+ go r.startLoopForFunction(r.scaleDown, common.PoolScaleDownInterval, "scale_down", false)
+ // always run the delete pending instances routine. This way we can still remove existing runners, even if the pool is not running.
+ go r.startLoopForFunction(r.deletePendingInstances, common.PoolConsilitationInterval, "consolidate[delete_pending]", true)
+ go r.startLoopForFunction(r.addPendingInstances, common.PoolConsilitationInterval, "consolidate[add_pending]", false)
+ go r.startLoopForFunction(r.ensureMinIdleRunners, common.PoolConsilitationInterval, "consolidate[ensure_min_idle]", false)
+ go r.startLoopForFunction(r.retryFailedInstances, common.PoolConsilitationInterval, "consolidate[retry_failed]", false)
+ go r.startLoopForFunction(r.updateTools, common.PoolToolUpdateInterval, "update_tools", true)
+ go r.startLoopForFunction(r.consumeQueuedJobs, common.PoolConsilitationInterval, "job_queue_consumer", false)
+ }()
return nil
}
@@ -1196,57 +1675,390 @@ func (r *basePoolManager) Stop() error {
return nil
}
-func (r *basePoolManager) RefreshState(param params.UpdatePoolStateParams) error {
- return r.helper.UpdateState(param)
-}
-
func (r *basePoolManager) WebhookSecret() string {
- return r.helper.WebhookSecret()
-}
-
-func (r *basePoolManager) GithubRunnerRegistrationToken() (string, error) {
- return r.helper.GetGithubRegistrationToken()
+ return r.entity.WebhookSecret
}
func (r *basePoolManager) ID() string {
- return r.helper.ID()
+ return r.entity.ID
}
-func (r *basePoolManager) ForceDeleteRunner(runner params.Instance) error {
- if !r.managerIsRunning {
- return runnerErrors.NewConflictError("pool manager is not running for %s", r.helper.String())
+// Delete runner will delete a runner from a pool. If forceRemove is set to true, any error received from
+// the IaaS provider will be ignored and deletion will continue.
+func (r *basePoolManager) DeleteRunner(runner params.Instance, forceRemove, bypassGHUnauthorizedError bool) error {
+ if !r.managerIsRunning && !bypassGHUnauthorizedError {
+ return runnerErrors.NewConflictError("pool manager is not running for %s", r.entity.String())
}
+
if runner.AgentID != 0 {
- resp, err := r.helper.RemoveGithubRunner(runner.AgentID)
- if err != nil {
- if resp != nil {
- switch resp.StatusCode {
- case http.StatusUnprocessableEntity:
- return errors.Wrapf(runnerErrors.ErrBadRequest, "removing runner: %q", err)
- case http.StatusNotFound:
- // Runner may have been deleted by a finished job, or manually by the user.
- log.Printf("runner with agent id %d was not found in github", runner.AgentID)
- case http.StatusUnauthorized:
- // Mark the pool as offline from this point forward
- failureReason := fmt.Sprintf("failed to remove runner: %q", err)
- r.setPoolRunningState(false, failureReason)
- log.Print(failureReason)
- // evaluate the next switch case.
- fallthrough
- default:
- return errors.Wrap(err, "removing runner")
+ if err := r.ghcli.RemoveEntityRunner(r.ctx, runner.AgentID); err != nil {
+ if errors.Is(err, runnerErrors.ErrUnauthorized) {
+ slog.With(slog.Any("error", err)).ErrorContext(r.ctx, "failed to remove runner from github")
+ // Mark the pool as offline from this point forward
+ r.SetPoolRunningState(false, fmt.Sprintf("failed to remove runner: %q", err))
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to remove runner")
+ if bypassGHUnauthorizedError {
+ slog.Info("bypass github unauthorized error is set, marking runner for deletion")
+ } else {
+ return fmt.Errorf("error removing runner: %w", err)
}
} else {
- // We got a nil response. Assume we are in error.
- return errors.Wrap(err, "removing runner")
+ return fmt.Errorf("error removing runner: %w", err)
}
}
}
- log.Printf("setting instance status for: %v", runner.Name)
- if err := r.setInstanceStatus(runner.Name, providerCommon.InstancePendingDelete, nil); err != nil {
- log.Printf("failed to update runner %s status", runner.Name)
- return errors.Wrap(err, "updating runner")
+ instanceStatus := commonParams.InstancePendingDelete
+ if forceRemove {
+ instanceStatus = commonParams.InstancePendingForceDelete
+ }
+
+ slog.InfoContext(
+ r.ctx, "setting instance status",
+ "runner_name", runner.Name,
+ "status", instanceStatus)
+ if _, err := r.setInstanceStatus(runner.Name, instanceStatus, nil); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to update runner",
+ "runner_name", runner.Name)
+ return fmt.Errorf("error updating runner: %w", err)
}
return nil
}
+
+// consumeQueuedJobs will pull all the known jobs from the database and attempt to create a new
+// runner in one of the pools it manages, if it matches the requested labels.
+// This is a best effort attempt to consume queued jobs. We do not have any real way to know which
+// runner from which pool will pick up a job we react to here. For example, the same job may be received
+// by an enterprise manager, an org manager AND a repo manager. If an idle runner from another pool
+// picks up the job after we created a runner in this pool, we will have an extra runner that may or may not
+// have a job waiting for it.
+// This is not a huge problem, as we have scale down logic which should remove any idle runners that have not
+// picked up a job within a certain time frame. Also, the logic here should ensure that eventually, all known
+// queued jobs will be consumed sooner or later.
+//
+// NOTE: jobs that were created while the garm instance was down, will be unknown to garm itself and will linger
+// in queued state if the pools defined in garm have a minimum idle runner value set to 0. Simply put, garm won't
+// know about the queued jobs that we didn't get a webhook for. Listing all jobs on startup is not feasible, as
+// an enterprise may have thousands of repos and thousands of jobs in queued state. To fetch all jobs for an
+// enterprise, we'd have to list all repos, and for each repo list all jobs currently in queued state. This is
+// not desirable by any measure.
+//
+// One way to handle situations where garm comes up after a longer period of time, is to temporarily max out the
+// min-idle-runner setting on pools, or at least raise it above 0. The idle runners will start to consume jobs, and
+// as they do so, new idle runners will be spun up in their stead. New jobs will record in the DB as they come in,
+// so those will trigger the creation of a runner. The jobs we don't know about will be dealt with by the idle runners.
+// Once jobs are consumed, you can set min-idle-runners to 0 again.
+func (r *basePoolManager) consumeQueuedJobs() error {
+ queued, err := r.store.ListEntityJobsByStatus(r.ctx, r.entity.EntityType, r.entity.ID, params.JobStatusQueued)
+ if err != nil {
+ return fmt.Errorf("error listing queued jobs: %w", err)
+ }
+
+ poolsCache := poolsForTags{
+ poolCacheType: r.entity.GetPoolBalancerType(),
+ }
+
+ slog.DebugContext(
+ r.ctx, "found queued jobs",
+ "job_count", len(queued))
+ for _, job := range queued {
+ if job.LockedBy != uuid.Nil && job.LockedBy.String() != r.ID() {
+ // Job was handled by us or another entity.
+ slog.DebugContext(
+ r.ctx, "job is locked",
+ "job_id", job.WorkflowJobID,
+ "locking_entity", job.LockedBy.String())
+ continue
+ }
+
+ if time.Since(job.UpdatedAt) < time.Second*r.controllerInfo.JobBackoff() {
+ // give the idle runners a chance to pick up the job.
+ slog.DebugContext(
+ r.ctx, "job backoff not reached", "backoff_interval", r.controllerInfo.MinimumJobAgeBackoff,
+ "job_id", job.WorkflowJobID)
+ continue
+ }
+
+ if time.Since(job.UpdatedAt) >= time.Minute*10 {
+ // Job is still queued in our db, 10 minutes after a matching runner
+ // was spawned. Unlock it and try again. A different job may have picked up
+ // the runner.
+ if err := r.store.UnlockJob(r.ctx, job.WorkflowJobID, r.ID()); err != nil {
+ // nolint:golangci-lint,godox
+ // TODO: Implament a cache? Should we return here?
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to unlock job",
+ "job_id", job.WorkflowJobID)
+ continue
+ }
+ }
+
+ if job.LockedBy.String() == r.ID() {
+ // nolint:golangci-lint,godox
+ // Job is locked by us. We must have already attepted to create a runner for it. Skip.
+ // TODO(gabriel-samfira): create an in-memory state of existing runners that we can easily
+ // check for existing pending or idle runners. If we can't find any, attempt to allocate another
+ // runner.
+ slog.DebugContext(
+ r.ctx, "job is locked by us",
+ "job_id", job.WorkflowJobID)
+ continue
+ }
+
+ poolRR, ok := poolsCache.Get(job.Labels)
+ if !ok {
+ potentialPools, err := r.store.FindPoolsMatchingAllTags(r.ctx, r.entity.EntityType, r.entity.ID, job.Labels)
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "error finding pools matching labels")
+ continue
+ }
+ poolRR = poolsCache.Add(job.Labels, potentialPools)
+ }
+
+ if poolRR.Len() == 0 {
+ slog.DebugContext(r.ctx, "could not find pools with labels", "requested_labels", strings.Join(job.Labels, ","))
+ continue
+ }
+
+ runnerCreated := false
+ if err := r.store.LockJob(r.ctx, job.WorkflowJobID, r.ID()); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "could not lock job",
+ "job_id", job.WorkflowJobID)
+ continue
+ }
+
+ jobLabels := []string{
+ fmt.Sprintf("%s=%d", jobLabelPrefix, job.WorkflowJobID),
+ }
+ for i := 0; i < poolRR.Len(); i++ {
+ pool, err := poolRR.Next()
+ if err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "could not find a pool to create a runner for job",
+ "job_id", job.WorkflowJobID)
+ break
+ }
+
+ slog.InfoContext(
+ r.ctx, "attempting to create a runner in pool",
+ "pool_id", pool.ID,
+ "job_id", job.WorkflowJobID)
+ if err := r.addRunnerToPool(pool, jobLabels); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "could not add runner to pool",
+ "pool_id", pool.ID)
+ continue
+ }
+ slog.DebugContext(r.ctx, "a new runner was added as a response to queued job",
+ "pool_id", pool.ID,
+ "job_id", job.WorkflowJobID)
+ runnerCreated = true
+ break
+ }
+
+ if !runnerCreated {
+ slog.WarnContext(
+ r.ctx, "could not create a runner for job; unlocking",
+ "job_id", job.WorkflowJobID)
+ if err := r.store.UnlockJob(r.ctx, job.WorkflowJobID, r.ID()); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to unlock job",
+ "job_id", job.WorkflowJobID)
+ return fmt.Errorf("error unlocking job: %w", err)
+ }
+ }
+ }
+
+ if err := r.store.DeleteCompletedJobs(r.ctx); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ r.ctx, "failed to delete completed jobs")
+ }
+ return nil
+}
+
+func (r *basePoolManager) UninstallWebhook(ctx context.Context) error {
+ if r.controllerInfo.ControllerWebhookURL == "" {
+ return runnerErrors.NewBadRequestError("controller webhook url is empty")
+ }
+
+ allHooks, err := r.listHooks(ctx)
+ if err != nil {
+ return fmt.Errorf("error listing hooks: %w", err)
+ }
+
+ var controllerHookID int64
+ var baseHook string
+ trimmedBase := strings.TrimRight(r.controllerInfo.WebhookURL, "/")
+ trimmedController := strings.TrimRight(r.controllerInfo.ControllerWebhookURL, "/")
+
+ for _, hook := range allHooks {
+ hookInfo := hookToParamsHookInfo(hook)
+ info := strings.TrimRight(hookInfo.URL, "/")
+ if strings.EqualFold(info, trimmedController) {
+ controllerHookID = hook.GetID()
+ }
+
+ if strings.EqualFold(info, trimmedBase) {
+ baseHook = hookInfo.URL
+ }
+ }
+
+ if controllerHookID != 0 {
+ _, err = r.ghcli.DeleteEntityHook(ctx, controllerHookID)
+ if err != nil {
+ return fmt.Errorf("deleting hook: %w", err)
+ }
+ return nil
+ }
+
+ if baseHook != "" {
+ return runnerErrors.NewBadRequestError("base hook found (%s) and must be deleted manually", baseHook)
+ }
+
+ return nil
+}
+
+func (r *basePoolManager) InstallHook(ctx context.Context, req *github.Hook) (params.HookInfo, error) {
+ allHooks, err := r.listHooks(ctx)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error listing hooks: %w", err)
+ }
+
+ if err := validateHookRequest(r.controllerInfo.ControllerID.String(), r.controllerInfo.WebhookURL, allHooks, req); err != nil {
+ return params.HookInfo{}, fmt.Errorf("error validating hook request: %w", err)
+ }
+
+ hook, err := r.ghcli.CreateEntityHook(ctx, req)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error creating entity hook: %w", err)
+ }
+
+ if _, err := r.ghcli.PingEntityHook(ctx, hook.GetID()); err != nil {
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to ping hook",
+ "hook_id", hook.GetID(),
+ "entity", r.entity)
+ }
+
+ return hookToParamsHookInfo(hook), nil
+}
+
+func (r *basePoolManager) InstallWebhook(ctx context.Context, param params.InstallWebhookParams) (params.HookInfo, error) {
+ if r.controllerInfo.ControllerWebhookURL == "" {
+ return params.HookInfo{}, runnerErrors.NewBadRequestError("controller webhook url is empty")
+ }
+
+ insecureSSL := "0"
+ if param.InsecureSSL {
+ insecureSSL = "1"
+ }
+ req := &github.Hook{
+ Active: github.Ptr(true),
+ Config: &github.HookConfig{
+ ContentType: github.Ptr("json"),
+ InsecureSSL: github.Ptr(insecureSSL),
+ URL: github.Ptr(r.controllerInfo.ControllerWebhookURL),
+ Secret: github.Ptr(r.WebhookSecret()),
+ },
+ Events: []string{
+ "workflow_job",
+ },
+ }
+
+ return r.InstallHook(ctx, req)
+}
+
+func (r *basePoolManager) ValidateOwner(job params.WorkflowJob) error {
+ switch r.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ if !strings.EqualFold(job.Repository.Name, r.entity.Name) || !strings.EqualFold(job.Repository.Owner.Login, r.entity.Owner) {
+ return runnerErrors.NewBadRequestError("job not meant for this pool manager")
+ }
+ case params.ForgeEntityTypeOrganization:
+ if !strings.EqualFold(job.GetOrgName(r.entity.Credentials.ForgeType), r.entity.Owner) {
+ return runnerErrors.NewBadRequestError("job not meant for this pool manager")
+ }
+ case params.ForgeEntityTypeEnterprise:
+ if !strings.EqualFold(job.Enterprise.Slug, r.entity.Owner) {
+ return runnerErrors.NewBadRequestError("job not meant for this pool manager")
+ }
+ default:
+ return runnerErrors.NewBadRequestError("unknown entity type")
+ }
+
+ return nil
+}
+
+func (r *basePoolManager) GithubRunnerRegistrationToken() (string, error) {
+ tk, ghResp, err := r.ghcli.CreateEntityRegistrationToken(r.ctx)
+ if err != nil {
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return "", runnerErrors.NewUnauthorizedError("error fetching token")
+ }
+ return "", fmt.Errorf("error creating runner token: %w", err)
+ }
+ return *tk.Token, nil
+}
+
+func (r *basePoolManager) FetchTools() ([]commonParams.RunnerApplicationDownload, error) {
+ tools, ghResp, err := r.ghcli.ListEntityRunnerApplicationDownloads(r.ctx)
+ if err != nil {
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return nil, runnerErrors.NewUnauthorizedError("error fetching tools")
+ }
+ return nil, fmt.Errorf("error fetching runner tools: %w", err)
+ }
+
+ ret := []commonParams.RunnerApplicationDownload{}
+ for _, tool := range tools {
+ if tool == nil {
+ continue
+ }
+ ret = append(ret, commonParams.RunnerApplicationDownload(*tool))
+ }
+ return ret, nil
+}
+
+func (r *basePoolManager) GetWebhookInfo(ctx context.Context) (params.HookInfo, error) {
+ allHooks, err := r.listHooks(ctx)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error listing hooks: %w", err)
+ }
+ trimmedBase := strings.TrimRight(r.controllerInfo.WebhookURL, "/")
+ trimmedController := strings.TrimRight(r.controllerInfo.ControllerWebhookURL, "/")
+
+ var controllerHookInfo *params.HookInfo
+ var baseHookInfo *params.HookInfo
+
+ for _, hook := range allHooks {
+ hookInfo := hookToParamsHookInfo(hook)
+ info := strings.TrimRight(hookInfo.URL, "/")
+ if strings.EqualFold(info, trimmedController) {
+ controllerHookInfo = &hookInfo
+ break
+ }
+ if strings.EqualFold(info, trimmedBase) {
+ baseHookInfo = &hookInfo
+ }
+ }
+
+ // Return the controller hook info if available.
+ if controllerHookInfo != nil {
+ return *controllerHookInfo, nil
+ }
+
+ // Fall back to base hook info if defined.
+ if baseHookInfo != nil {
+ return *baseHookInfo, nil
+ }
+
+ return params.HookInfo{}, runnerErrors.NewNotFoundError("hook not found")
+}
+
+func (r *basePoolManager) RootCABundle() (params.CertificateBundle, error) {
+ return r.entity.Credentials.RootCertificateBundle()
+}
diff --git a/runner/pool/repository.go b/runner/pool/repository.go
deleted file mode 100644
index 882206a7..00000000
--- a/runner/pool/repository.go
+++ /dev/null
@@ -1,232 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package pool
-
-import (
- "context"
- "fmt"
- "net/http"
- "strings"
- "sync"
-
- dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/common"
- "github.com/cloudbase/garm/util"
-
- "github.com/google/go-github/v48/github"
- "github.com/pkg/errors"
-)
-
-// test that we implement PoolManager
-var _ poolHelper = &repository{}
-
-func NewRepositoryPoolManager(ctx context.Context, cfg params.Repository, cfgInternal params.Internal, providers map[string]common.Provider, store dbCommon.Store) (common.PoolManager, error) {
- ghc, _, err := util.GithubClient(ctx, cfgInternal.OAuth2Token, cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return nil, errors.Wrap(err, "getting github client")
- }
-
- helper := &repository{
- cfg: cfg,
- cfgInternal: cfgInternal,
- ctx: ctx,
- ghcli: ghc,
- id: cfg.ID,
- store: store,
- }
-
- repo := &basePoolManager{
- ctx: ctx,
- store: store,
- providers: providers,
- controllerID: cfgInternal.ControllerID,
- quit: make(chan struct{}),
- done: make(chan struct{}),
- helper: helper,
- credsDetails: cfgInternal.GithubCredentialsDetails,
- }
- return repo, nil
-}
-
-var _ poolHelper = &repository{}
-
-type repository struct {
- cfg params.Repository
- cfgInternal params.Internal
- ctx context.Context
- ghcli common.GithubClient
- id string
- store dbCommon.Store
-
- mux sync.Mutex
-}
-
-func (r *repository) GetRunnerInfoFromWorkflow(job params.WorkflowJob) (params.RunnerInfo, error) {
- if err := r.ValidateOwner(job); err != nil {
- return params.RunnerInfo{}, errors.Wrap(err, "validating owner")
- }
- workflow, ghResp, err := r.ghcli.GetWorkflowJobByID(r.ctx, job.Repository.Owner.Login, job.Repository.Name, job.WorkflowJob.ID)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return params.RunnerInfo{}, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching workflow info")
- }
- return params.RunnerInfo{}, errors.Wrap(err, "fetching workflow info")
- }
-
- if workflow.RunnerName != nil {
- return params.RunnerInfo{
- Name: *workflow.RunnerName,
- Labels: workflow.Labels,
- }, nil
- }
- return params.RunnerInfo{}, fmt.Errorf("failed to find runner name from workflow")
-}
-
-func (r *repository) UpdateState(param params.UpdatePoolStateParams) error {
- r.mux.Lock()
- defer r.mux.Unlock()
-
- r.cfg.WebhookSecret = param.WebhookSecret
-
- ghc, _, err := util.GithubClient(r.ctx, r.GetGithubToken(), r.cfgInternal.GithubCredentialsDetails)
- if err != nil {
- return errors.Wrap(err, "getting github client")
- }
- r.ghcli = ghc
- return nil
-}
-
-func (r *repository) GetGithubToken() string {
- return r.cfgInternal.OAuth2Token
-}
-
-func (r *repository) GetGithubRunners() ([]*github.Runner, error) {
- opts := github.ListOptions{
- PerPage: 100,
- }
-
- var allRunners []*github.Runner
- for {
- runners, ghResp, err := r.ghcli.ListRunners(r.ctx, r.cfg.Owner, r.cfg.Name, &opts)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching runners")
- }
- return nil, errors.Wrap(err, "fetching runners")
- }
- allRunners = append(allRunners, runners.Runners...)
- if ghResp.NextPage == 0 {
- break
- }
- opts.Page = ghResp.NextPage
- }
-
- return allRunners, nil
-}
-
-func (r *repository) FetchTools() ([]*github.RunnerApplicationDownload, error) {
- r.mux.Lock()
- defer r.mux.Unlock()
- tools, ghResp, err := r.ghcli.ListRunnerApplicationDownloads(r.ctx, r.cfg.Owner, r.cfg.Name)
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return nil, errors.Wrap(runnerErrors.ErrUnauthorized, "fetching tools")
- }
- return nil, errors.Wrap(err, "fetching runner tools")
- }
-
- return tools, nil
-}
-
-func (r *repository) FetchDbInstances() ([]params.Instance, error) {
- return r.store.ListRepoInstances(r.ctx, r.id)
-}
-
-func (r *repository) RemoveGithubRunner(runnerID int64) (*github.Response, error) {
- return r.ghcli.RemoveRunner(r.ctx, r.cfg.Owner, r.cfg.Name, runnerID)
-}
-
-func (r *repository) ListPools() ([]params.Pool, error) {
- pools, err := r.store.ListRepoPools(r.ctx, r.id)
- if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
- }
- return pools, nil
-}
-
-func (r *repository) GithubURL() string {
- return fmt.Sprintf("%s/%s/%s", r.cfgInternal.GithubCredentialsDetails.BaseURL, r.cfg.Owner, r.cfg.Name)
-}
-
-func (r *repository) JwtToken() string {
- return r.cfgInternal.JWTSecret
-}
-
-func (r *repository) GetGithubRegistrationToken() (string, error) {
- tk, ghResp, err := r.ghcli.CreateRegistrationToken(r.ctx, r.cfg.Owner, r.cfg.Name)
-
- if err != nil {
- if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
- return "", errors.Wrap(runnerErrors.ErrUnauthorized, "fetching token")
- }
- return "", errors.Wrap(err, "creating runner token")
- }
- return *tk.Token, nil
-}
-
-func (r *repository) String() string {
- return fmt.Sprintf("%s/%s", r.cfg.Owner, r.cfg.Name)
-}
-
-func (r *repository) WebhookSecret() string {
- return r.cfg.WebhookSecret
-}
-
-func (r *repository) GetCallbackURL() string {
- return r.cfgInternal.InstanceCallbackURL
-}
-
-func (r *repository) GetMetadataURL() string {
- return r.cfgInternal.InstanceMetadataURL
-}
-
-func (r *repository) FindPoolByTags(labels []string) (params.Pool, error) {
- pool, err := r.store.FindRepositoryPoolByTags(r.ctx, r.id, labels)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching suitable pool")
- }
- return pool, nil
-}
-
-func (r *repository) GetPoolByID(poolID string) (params.Pool, error) {
- pool, err := r.store.GetRepositoryPool(r.ctx, r.id, poolID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
- }
- return pool, nil
-}
-
-func (r *repository) ValidateOwner(job params.WorkflowJob) error {
- if !strings.EqualFold(job.Repository.Name, r.cfg.Name) || !strings.EqualFold(job.Repository.Owner.Login, r.cfg.Owner) {
- return runnerErrors.NewBadRequestError("job not meant for this pool manager")
- }
- return nil
-}
-
-func (r *repository) ID() string {
- return r.id
-}
diff --git a/runner/pool/stub_client.go b/runner/pool/stub_client.go
new file mode 100644
index 00000000..0afd6a52
--- /dev/null
+++ b/runner/pool/stub_client.go
@@ -0,0 +1,88 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package pool
+
+import (
+ "context"
+ "net/url"
+
+ "github.com/google/go-github/v72/github"
+
+ "github.com/cloudbase/garm/params"
+)
+
+type stubGithubClient struct {
+ err error
+}
+
+func (s *stubGithubClient) ListEntityHooks(_ context.Context, _ *github.ListOptions) ([]*github.Hook, *github.Response, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) GetEntityHook(_ context.Context, _ int64) (*github.Hook, error) {
+ return nil, s.err
+}
+
+func (s *stubGithubClient) CreateEntityHook(_ context.Context, _ *github.Hook) (*github.Hook, error) {
+ return nil, s.err
+}
+
+func (s *stubGithubClient) DeleteEntityHook(_ context.Context, _ int64) (*github.Response, error) {
+ return nil, s.err
+}
+
+func (s *stubGithubClient) PingEntityHook(_ context.Context, _ int64) (*github.Response, error) {
+ return nil, s.err
+}
+
+func (s *stubGithubClient) ListEntityRunners(_ context.Context, _ *github.ListRunnersOptions) (*github.Runners, *github.Response, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) ListEntityRunnerApplicationDownloads(_ context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) RemoveEntityRunner(_ context.Context, _ int64) error {
+ return s.err
+}
+
+func (s *stubGithubClient) CreateEntityRegistrationToken(_ context.Context) (*github.RegistrationToken, *github.Response, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) GetEntityJITConfig(_ context.Context, _ string, _ params.Pool, _ []string) (map[string]string, *github.Runner, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) GetWorkflowJobByID(_ context.Context, _, _ string, _ int64) (*github.WorkflowJob, *github.Response, error) {
+ return nil, nil, s.err
+}
+
+func (s *stubGithubClient) GetEntity() params.ForgeEntity {
+ return params.ForgeEntity{}
+}
+
+func (s *stubGithubClient) GithubBaseURL() *url.URL {
+ return nil
+}
+
+func (s *stubGithubClient) RateLimit(_ context.Context) (*github.RateLimits, error) {
+ return nil, s.err
+}
+
+func (s *stubGithubClient) GetEntityRunnerGroupIDByName(_ context.Context, _ string) (int64, error) {
+ return 0, s.err
+}
diff --git a/runner/pool/util.go b/runner/pool/util.go
new file mode 100644
index 00000000..d58f90a3
--- /dev/null
+++ b/runner/pool/util.go
@@ -0,0 +1,273 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package pool
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/google/go-github/v72/github"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/cache"
+ dbCommon "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/database/watcher"
+ "github.com/cloudbase/garm/params"
+)
+
+func instanceInList(instanceName string, instances []commonParams.ProviderInstance) (commonParams.ProviderInstance, bool) {
+ for _, val := range instances {
+ if val.Name == instanceName {
+ return val, true
+ }
+ }
+ return commonParams.ProviderInstance{}, false
+}
+
+func controllerIDFromLabels(labels []string) string {
+ for _, lbl := range labels {
+ if strings.HasPrefix(lbl, controllerLabelPrefix) {
+ trimLength := min(len(controllerLabelPrefix)+1, len(lbl))
+ return lbl[trimLength:]
+ }
+ }
+ return ""
+}
+
+func labelsFromRunner(runner forgeRunner) []string {
+ if runner.Labels == nil {
+ return []string{}
+ }
+
+ var labels []string
+ for _, val := range runner.Labels {
+ labels = append(labels, val.Name)
+ }
+ return labels
+}
+
+// isManagedRunner returns true if labels indicate the runner belongs to a pool
+// this manager is responsible for.
+func isManagedRunner(labels []string, controllerID string) bool {
+ runnerControllerID := controllerIDFromLabels(labels)
+ return runnerControllerID == controllerID
+}
+
+func composeWatcherFilters(entity params.ForgeEntity) dbCommon.PayloadFilterFunc {
+ // We want to watch for changes in either the controller or the
+ // entity itself.
+ return watcher.WithAny(
+ watcher.WithAll(
+ // Updates to the controller
+ watcher.WithEntityTypeFilter(dbCommon.ControllerEntityType),
+ watcher.WithOperationTypeFilter(dbCommon.UpdateOperation),
+ ),
+ // Any operation on the entity we're managing the pool for.
+ watcher.WithEntityFilter(entity),
+ // Watch for changes to the github credentials
+ watcher.WithForgeCredentialsFilter(entity.Credentials),
+ )
+}
+
+func (r *basePoolManager) waitForToolsOrCancel() (hasTools, stopped bool) {
+ ticker := time.NewTicker(1 * time.Second)
+ defer ticker.Stop()
+ select {
+ case <-ticker.C:
+ if _, err := cache.GetGithubToolsCache(r.entity.ID); err != nil {
+ return false, false
+ }
+ return true, false
+ case <-r.quit:
+ return false, true
+ case <-r.ctx.Done():
+ return false, true
+ }
+}
+
+func validateHookRequest(controllerID, baseURL string, allHooks []*github.Hook, req *github.Hook) error {
+ parsed, err := url.Parse(baseURL)
+ if err != nil {
+ return fmt.Errorf("error parsing webhook url: %w", err)
+ }
+
+ partialMatches := []string{}
+ for _, hook := range allHooks {
+ hookURL := strings.ToLower(hook.Config.GetURL())
+ if hookURL == "" {
+ continue
+ }
+
+ if hook.Config.GetURL() == req.Config.GetURL() {
+ return runnerErrors.NewConflictError("hook already installed")
+ } else if strings.Contains(hookURL, controllerID) || strings.Contains(hookURL, parsed.Hostname()) {
+ partialMatches = append(partialMatches, hook.Config.GetURL())
+ }
+ }
+
+ if len(partialMatches) > 0 {
+ return runnerErrors.NewConflictError("a webhook containing the controller ID or hostname of this contreoller is already installed on this repository")
+ }
+
+ return nil
+}
+
+func hookToParamsHookInfo(hook *github.Hook) params.HookInfo {
+ hookURL := hook.Config.GetURL()
+
+ insecureSSLConfig := hook.Config.GetInsecureSSL()
+ insecureSSL := insecureSSLConfig == "1"
+
+ return params.HookInfo{
+ ID: *hook.ID,
+ URL: hookURL,
+ Events: hook.Events,
+ Active: *hook.Active,
+ InsecureSSL: insecureSSL,
+ }
+}
+
+func (r *basePoolManager) listHooks(ctx context.Context) ([]*github.Hook, error) {
+ opts := github.ListOptions{
+ PerPage: 100,
+ }
+ var allHooks []*github.Hook
+ for {
+ hooks, ghResp, err := r.ghcli.ListEntityHooks(ctx, &opts)
+ if err != nil {
+ if ghResp != nil && ghResp.StatusCode == http.StatusNotFound {
+ return nil, runnerErrors.NewBadRequestError("repository not found or your PAT does not have access to manage webhooks")
+ }
+ return nil, fmt.Errorf("error fetching hooks: %w", err)
+ }
+ allHooks = append(allHooks, hooks...)
+ if ghResp.NextPage == 0 {
+ break
+ }
+ opts.Page = ghResp.NextPage
+ }
+ return allHooks, nil
+}
+
+func (r *basePoolManager) listRunnersWithPagination() ([]forgeRunner, error) {
+ opts := github.ListRunnersOptions{
+ ListOptions: github.ListOptions{
+ PerPage: 100,
+ },
+ }
+ var allRunners []*github.Runner
+
+ // Paginating like this can lead to a situation where if we have many pages of runners,
+ // while we paginate, a particular runner can move from page n to page n-1 while we move
+ // from page n-1 to page n. In situations such as that, we end up with a list of runners
+ // that does not contain the runner that swapped pages while we were paginating.
+ // Sadly, the GitHub API does not allow listing more than 100 runners per page.
+ for {
+ runners, ghResp, err := r.ghcli.ListEntityRunners(r.ctx, &opts)
+ if err != nil {
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return nil, runnerErrors.NewUnauthorizedError("error fetching runners")
+ }
+ return nil, fmt.Errorf("error fetching runners: %w", err)
+ }
+ allRunners = append(allRunners, runners.Runners...)
+ if ghResp.NextPage == 0 {
+ break
+ }
+ opts.Page = ghResp.NextPage
+ }
+
+ ret := make([]forgeRunner, len(allRunners))
+ for idx, val := range allRunners {
+ ret[idx] = forgeRunner{
+ ID: val.GetID(),
+ Name: val.GetName(),
+ Status: val.GetStatus(),
+ Labels: make([]RunnerLabels, len(val.Labels)),
+ }
+ for labelIdx, label := range val.Labels {
+ ret[idx].Labels[labelIdx] = RunnerLabels{
+ Name: label.GetName(),
+ Type: label.GetType(),
+ ID: label.GetID(),
+ }
+ }
+ }
+
+ return ret, nil
+}
+
+func (r *basePoolManager) listRunnersWithScaleSetAPI() ([]forgeRunner, error) {
+ if r.scaleSetClient == nil {
+ return nil, fmt.Errorf("scaleset client not initialized")
+ }
+
+ runners, err := r.scaleSetClient.ListAllRunners(r.ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list runners through scaleset API: %w", err)
+ }
+
+ ret := []forgeRunner{}
+ for _, runner := range runners.RunnerReferences {
+ if runner.RunnerScaleSetID != 0 {
+ // skip scale set runners.
+ continue
+ }
+ run := forgeRunner{
+ Name: runner.Name,
+ ID: runner.ID,
+ Status: string(runner.GetStatus()),
+ Labels: make([]RunnerLabels, len(runner.Labels)),
+ }
+ for labelIDX, label := range runner.Labels {
+ run.Labels[labelIDX] = RunnerLabels{
+ Name: label.Name,
+ Type: label.Type,
+ }
+ }
+ ret = append(ret, run)
+ }
+ return ret, nil
+}
+
+func (r *basePoolManager) GetGithubRunners() ([]forgeRunner, error) {
+ // Gitea has no scale sets API
+ if r.scaleSetClient == nil {
+ return r.listRunnersWithPagination()
+ }
+
+ // try the scale sets API for github
+ runners, err := r.listRunnersWithScaleSetAPI()
+ if err != nil {
+ slog.WarnContext(r.ctx, "failed to list runners via scaleset API; falling back to pagination", "error", err)
+ return r.listRunnersWithPagination()
+ }
+
+ entityInstances := cache.GetEntityInstances(r.entity.ID)
+ if len(entityInstances) > 0 && len(runners) == 0 {
+ // I have trust issues in the undocumented API. We seem to have runners for this
+ // entity, but the scaleset API returned nothing and no error. Fall back to pagination.
+ slog.DebugContext(r.ctx, "the scaleset api returned nothing, but we seem to have runners in the db; falling back to paginated API runner list")
+ return r.listRunnersWithPagination()
+ }
+ slog.DebugContext(r.ctx, "Scaleset API runner list succeeded", "runners", runners)
+ return runners, nil
+}
diff --git a/runner/pool/util_test.go b/runner/pool/util_test.go
new file mode 100644
index 00000000..67d31f76
--- /dev/null
+++ b/runner/pool/util_test.go
@@ -0,0 +1,242 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package pool
+
+import (
+ "sync"
+ "testing"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+func TestPoolRoundRobinRollsOver(t *testing.T) {
+ p := &poolRoundRobin{
+ pools: []params.Pool{
+ {
+ ID: "1",
+ },
+ {
+ ID: "2",
+ },
+ },
+ }
+
+ pool, err := p.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "1" {
+ t.Fatalf("expected pool 1, got %s", pool.ID)
+ }
+
+ pool, err = p.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "2" {
+ t.Fatalf("expected pool 2, got %s", pool.ID)
+ }
+
+ pool, err = p.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "1" {
+ t.Fatalf("expected pool 1, got %s", pool.ID)
+ }
+}
+
+func TestPoolRoundRobinEmptyPoolErrorsOut(t *testing.T) {
+ p := &poolRoundRobin{}
+
+ _, err := p.Next()
+ if err == nil {
+ t.Fatalf("expected error, got nil")
+ }
+ if err != runnerErrors.ErrNoPoolsAvailable {
+ t.Fatalf("expected ErrNoPoolsAvailable, got %s", err)
+ }
+}
+
+func TestPoolRoundRobinLen(t *testing.T) {
+ p := &poolRoundRobin{
+ pools: []params.Pool{
+ {
+ ID: "1",
+ },
+ {
+ ID: "2",
+ },
+ },
+ }
+
+ if p.Len() != 2 {
+ t.Fatalf("expected 2, got %d", p.Len())
+ }
+}
+
+func TestPoolRoundRobinReset(t *testing.T) {
+ p := &poolRoundRobin{
+ pools: []params.Pool{
+ {
+ ID: "1",
+ },
+ {
+ ID: "2",
+ },
+ },
+ }
+
+ p.Next()
+ p.Reset()
+ if p.next != 0 {
+ t.Fatalf("expected 0, got %d", p.next)
+ }
+}
+
+func TestPoolsForTagsPackGet(t *testing.T) {
+ p := &poolsForTags{
+ poolCacheType: params.PoolBalancerTypePack,
+ }
+
+ pools := []params.Pool{
+ {
+ ID: "1",
+ Priority: 0,
+ },
+ {
+ ID: "2",
+ Priority: 100,
+ },
+ }
+ _ = p.Add([]string{"key"}, pools)
+ cache, ok := p.Get([]string{"key"})
+ if !ok {
+ t.Fatalf("expected true, got false")
+ }
+ if cache.Len() != 2 {
+ t.Fatalf("expected 2, got %d", cache.Len())
+ }
+
+ poolRR, ok := cache.(*poolRoundRobin)
+ if !ok {
+ t.Fatalf("expected poolRoundRobin, got %v", cache)
+ }
+ if poolRR.next != 0 {
+ t.Fatalf("expected 0, got %d", poolRR.next)
+ }
+ pool, err := poolRR.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "2" {
+ t.Fatalf("expected pool 2, got %s", pool.ID)
+ }
+
+ if poolRR.next != 1 {
+ t.Fatalf("expected 1, got %d", poolRR.next)
+ }
+ // Getting the pool cache again should reset next
+ cache, ok = p.Get([]string{"key"})
+ if !ok {
+ t.Fatalf("expected true, got false")
+ }
+ poolRR, ok = cache.(*poolRoundRobin)
+ if !ok {
+ t.Fatalf("expected poolRoundRobin, got %v", cache)
+ }
+ if poolRR.next != 0 {
+ t.Fatalf("expected 0, got %d", poolRR.next)
+ }
+}
+
+func TestPoolsForTagsRoundRobinGet(t *testing.T) {
+ p := &poolsForTags{
+ poolCacheType: params.PoolBalancerTypeRoundRobin,
+ }
+
+ pools := []params.Pool{
+ {
+ ID: "1",
+ Priority: 0,
+ },
+ {
+ ID: "2",
+ Priority: 100,
+ },
+ }
+ _ = p.Add([]string{"key"}, pools)
+ cache, ok := p.Get([]string{"key"})
+ if !ok {
+ t.Fatalf("expected true, got false")
+ }
+ if cache.Len() != 2 {
+ t.Fatalf("expected 2, got %d", cache.Len())
+ }
+
+ pool, err := cache.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "2" {
+ t.Fatalf("expected pool 2, got %s", pool.ID)
+ }
+ // Getting the pool cache again should not reset next, and
+ // should return the next pool.
+ cache, ok = p.Get([]string{"key"})
+ if !ok {
+ t.Fatalf("expected true, got false")
+ }
+ pool, err = cache.Next()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if pool.ID != "1" {
+ t.Fatalf("expected pool 1, got %s", pool.ID)
+ }
+}
+
+func TestPoolsForTagsNoPoolsForTag(t *testing.T) {
+ p := &poolsForTags{
+ pools: sync.Map{},
+ }
+
+ _, ok := p.Get([]string{"key"})
+ if ok {
+ t.Fatalf("expected false, got true")
+ }
+}
+
+func TestPoolsForTagsBalancerTypePack(t *testing.T) {
+ p := &poolsForTags{
+ pools: sync.Map{},
+ poolCacheType: params.PoolBalancerTypePack,
+ }
+
+ poolCache := &poolRoundRobin{}
+ p.pools.Store("key", poolCache)
+
+ cache, ok := p.Get([]string{"key"})
+ if !ok {
+ t.Fatalf("expected true, got false")
+ }
+ if cache != poolCache {
+ t.Fatalf("expected poolCache, got %v", cache)
+ }
+ if poolCache.next != 0 {
+ t.Fatalf("expected 0, got %d", poolCache.next)
+ }
+}
diff --git a/runner/pool/watcher.go b/runner/pool/watcher.go
new file mode 100644
index 00000000..999b52c6
--- /dev/null
+++ b/runner/pool/watcher.go
@@ -0,0 +1,183 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package pool
+
+import (
+ "log/slog"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/database/common"
+ "github.com/cloudbase/garm/params"
+ runnerCommon "github.com/cloudbase/garm/runner/common"
+ ghClient "github.com/cloudbase/garm/util/github"
+)
+
+// entityGetter is implemented by all github entities (repositories, organizations and enterprises)
+type entityGetter interface {
+ GetEntity() (params.ForgeEntity, error)
+}
+
+func (r *basePoolManager) handleControllerUpdateEvent(controllerInfo params.ControllerInfo) {
+ r.mux.Lock()
+ defer r.mux.Unlock()
+
+ slog.DebugContext(r.ctx, "updating controller info", "controller_info", controllerInfo)
+ r.controllerInfo = controllerInfo
+}
+
+func (r *basePoolManager) getClientOrStub() runnerCommon.GithubClient {
+ var err error
+ var ghc runnerCommon.GithubClient
+ ghc, err = ghClient.Client(r.ctx, r.entity)
+ if err != nil {
+ slog.WarnContext(r.ctx, "failed to create github client", "error", err)
+ ghc = &stubGithubClient{
+ err: runnerErrors.NewUnauthorizedError("failed to create github client; please update credentials"),
+ }
+ }
+ return ghc
+}
+
+func (r *basePoolManager) handleEntityUpdate(entity params.ForgeEntity, operation common.OperationType) {
+ slog.DebugContext(r.ctx, "received entity operation", "entity", entity.ID, "operation", operation)
+ if r.entity.ID != entity.ID {
+ slog.WarnContext(r.ctx, "entity ID mismatch; stale event? refusing to update", "entity", entity.ID)
+ return
+ }
+
+ if operation == common.DeleteOperation {
+ slog.InfoContext(r.ctx, "entity deleted; closing db consumer", "entity", entity.ID)
+ r.consumer.Close()
+ return
+ }
+
+ if operation != common.UpdateOperation {
+ slog.DebugContext(r.ctx, "operation not update; ignoring", "entity", entity.ID, "operation", operation)
+ return
+ }
+
+ credentialsUpdate := r.entity.Credentials.GetID() != entity.Credentials.GetID()
+ defer func() {
+ slog.DebugContext(r.ctx, "deferred tools update", "credentials_update", credentialsUpdate)
+ if !credentialsUpdate {
+ return
+ }
+ slog.DebugContext(r.ctx, "updating tools", "entity", entity.ID)
+ if err := r.updateTools(); err != nil {
+ slog.ErrorContext(r.ctx, "failed to update tools", "error", err)
+ }
+ }()
+
+ slog.DebugContext(r.ctx, "updating entity", "entity", entity.ID)
+ r.mux.Lock()
+ slog.DebugContext(r.ctx, "lock acquired", "entity", entity.ID)
+
+ r.entity = entity
+ if credentialsUpdate {
+ if r.consumer != nil {
+ filters := composeWatcherFilters(r.entity)
+ r.consumer.SetFilters(filters)
+ }
+ slog.DebugContext(r.ctx, "credentials update", "entity", entity.ID)
+ r.ghcli = r.getClientOrStub()
+ }
+ r.mux.Unlock()
+ slog.DebugContext(r.ctx, "lock released", "entity", entity.ID)
+}
+
+func (r *basePoolManager) handleCredentialsUpdate(credentials params.ForgeCredentials) {
+ // when we switch credentials on an entity (like from one app to another or from an app
+ // to a PAT), we may still get events for the previous credentials as the channel is buffered.
+ // The watcher will watch for changes to the entity itself, which includes events that
+ // change the credentials name on the entity, but we also watch for changes to the credentials
+ // themselves, like an updated PAT token set on existing credentials entity.
+ // The handleCredentialsUpdate function handles situations where we have changes on the
+ // credentials entity itself, not on the entity that the credentials are set on.
+ // For example, we may have a credentials entity called org_pat set on a repo called
+ // test-repo. This function would handle situations where "org_pat" is updated.
+ // If "test-repo" is updated with new credentials, that event is handled above in
+ // handleEntityUpdate.
+ shouldUpdateTools := r.entity.Credentials.GetID() == credentials.GetID()
+ defer func() {
+ if !shouldUpdateTools {
+ return
+ }
+ slog.DebugContext(r.ctx, "deferred tools update", "credentials_id", credentials.GetID())
+ if err := r.updateTools(); err != nil {
+ slog.ErrorContext(r.ctx, "failed to update tools", "error", err)
+ }
+ }()
+
+ r.mux.Lock()
+ if !shouldUpdateTools {
+ slog.InfoContext(r.ctx, "credential ID mismatch; stale event?", "credentials_id", credentials.GetID())
+ r.mux.Unlock()
+ return
+ }
+
+ slog.DebugContext(r.ctx, "updating credentials", "credentials_id", credentials.GetID())
+ r.entity.Credentials = credentials
+ r.ghcli = r.getClientOrStub()
+ r.mux.Unlock()
+}
+
+func (r *basePoolManager) handleWatcherEvent(event common.ChangePayload) {
+ dbEntityType := common.DatabaseEntityType(r.entity.EntityType)
+ switch event.EntityType {
+ case common.GithubCredentialsEntityType, common.GiteaCredentialsEntityType:
+ credentials, ok := event.Payload.(params.ForgeCredentials)
+ if !ok {
+ slog.ErrorContext(r.ctx, "failed to cast payload to github credentials")
+ return
+ }
+ r.handleCredentialsUpdate(credentials)
+ case common.ControllerEntityType:
+ controllerInfo, ok := event.Payload.(params.ControllerInfo)
+ if !ok {
+ slog.ErrorContext(r.ctx, "failed to cast payload to controller info")
+ return
+ }
+ r.handleControllerUpdateEvent(controllerInfo)
+ case dbEntityType:
+ entity, ok := event.Payload.(entityGetter)
+ if !ok {
+ slog.ErrorContext(r.ctx, "failed to cast payload to entity")
+ return
+ }
+ entityInfo, err := entity.GetEntity()
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to get entity", "error", err)
+ return
+ }
+ r.handleEntityUpdate(entityInfo, event.Operation)
+ }
+}
+
+func (r *basePoolManager) runWatcher() {
+ defer r.consumer.Close()
+ for {
+ select {
+ case <-r.quit:
+ return
+ case <-r.ctx.Done():
+ return
+ case event, ok := <-r.consumer.Watch():
+ if !ok {
+ return
+ }
+ go r.handleWatcherEvent(event)
+ }
+ }
+}
diff --git a/runner/pools.go b/runner/pools.go
index f6561b68..ffd3b9c8 100644
--- a/runner/pools.go
+++ b/runner/pools.go
@@ -16,13 +16,12 @@ package runner
import (
"context"
+ "errors"
"fmt"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/auth"
- runnerErrors "github.com/cloudbase/garm/errors"
"github.com/cloudbase/garm/params"
-
- "github.com/pkg/errors"
)
func (r *Runner) ListAllPools(ctx context.Context) ([]params.Pool, error) {
@@ -32,7 +31,7 @@ func (r *Runner) ListAllPools(ctx context.Context) ([]params.Pool, error) {
pools, err := r.store.ListAllPools(ctx)
if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
+ return nil, fmt.Errorf("error fetching pools: %w", err)
}
return pools, nil
}
@@ -44,7 +43,7 @@ func (r *Runner) GetPoolByID(ctx context.Context, poolID string) (params.Pool, e
pool, err := r.store.GetPoolByID(ctx, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
return pool, nil
}
@@ -57,7 +56,7 @@ func (r *Runner) DeletePoolByID(ctx context.Context, poolID string) error {
pool, err := r.store.GetPoolByID(ctx, poolID)
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return errors.Wrap(err, "fetching pool")
+ return fmt.Errorf("error fetching pool: %w", err)
}
return nil
}
@@ -67,7 +66,7 @@ func (r *Runner) DeletePoolByID(ctx context.Context, poolID string) error {
}
if err := r.store.DeletePoolByID(ctx, poolID); err != nil {
- return errors.Wrap(err, "deleting pool")
+ return fmt.Errorf("error deleting pool: %w", err)
}
return nil
}
@@ -79,7 +78,7 @@ func (r *Runner) UpdatePoolByID(ctx context.Context, poolID string, param params
pool, err := r.store.GetPoolByID(ctx, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
maxRunners := pool.MaxRunners
@@ -100,28 +99,26 @@ func (r *Runner) UpdatePoolByID(ctx context.Context, poolID string, param params
return params.Pool{}, runnerErrors.NewBadRequestError("min_idle_runners cannot be larger than max_runners")
}
- if param.Tags != nil && len(param.Tags) > 0 {
- newTags, err := r.processTags(string(pool.OSArch), pool.OSType, param.Tags)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "processing tags")
- }
- param.Tags = newTags
- }
-
- var newPool params.Pool
-
- if pool.RepoID != "" {
- newPool, err = r.store.UpdateRepositoryPool(ctx, pool.RepoID, poolID, param)
- } else if pool.OrgID != "" {
- newPool, err = r.store.UpdateOrganizationPool(ctx, pool.OrgID, poolID, param)
- } else if pool.EnterpriseID != "" {
- newPool, err = r.store.UpdateEnterprisePool(ctx, pool.EnterpriseID, poolID, param)
- } else {
- return params.Pool{}, fmt.Errorf("pool not bound to a repo, org or enterprise")
- }
-
+ entity, err := pool.GetEntity()
if err != nil {
- return params.Pool{}, errors.Wrap(err, "updating pool")
+ return params.Pool{}, fmt.Errorf("error getting entity: %w", err)
+ }
+
+ newPool, err := r.store.UpdateEntityPool(ctx, entity, poolID, param)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error updating pool: %w", err)
}
return newPool, nil
}
+
+func (r *Runner) ListAllJobs(ctx context.Context) ([]params.Job, error) {
+ if !auth.IsAdmin(ctx) {
+ return []params.Job{}, runnerErrors.ErrUnauthorized
+ }
+
+ jobs, err := r.store.ListAllJobs(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching jobs: %w", err)
+ }
+ return jobs, nil
+}
diff --git a/runner/pools_test.go b/runner/pools_test.go
index c19a7858..2a2aea5d 100644
--- a/runner/pools_test.go
+++ b/runner/pools_test.go
@@ -19,15 +19,16 @@ import (
"fmt"
"testing"
+ "github.com/stretchr/testify/suite"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/auth"
"github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/database"
dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
garmTesting "github.com/cloudbase/garm/internal/testing"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
- "github.com/stretchr/testify/suite"
)
type PoolTestFixtures struct {
@@ -44,10 +45,15 @@ type PoolTestSuite struct {
suite.Suite
Fixtures *PoolTestFixtures
Runner *Runner
+
+ adminCtx context.Context
+ testCreds params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
}
func (s *PoolTestSuite) SetupTest() {
- adminCtx := auth.GetAdminContext()
+ adminCtx := auth.GetAdminContext(context.Background())
// create testing sqlite database
dbCfg := garmTesting.GetTestSqliteDBConfig(s.T())
@@ -56,18 +62,28 @@ func (s *PoolTestSuite) SetupTest() {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
+ s.adminCtx = garmTesting.ImpersonateAdminContext(adminCtx, db, s.T())
+
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(s.adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(s.adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(s.adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+
// create an organization for testing purposes
- org, err := db.CreateOrganization(context.Background(), "test-org", "test-creds", "test-webhookSecret")
+ org, err := db.CreateOrganization(s.adminCtx, "test-org", s.testCreds, "test-webhookSecret", params.PoolBalancerTypeRoundRobin)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create org: %s", err))
}
// create some pool objects in the database, for testing purposes
+ entity := params.ForgeEntity{
+ ID: org.ID,
+ EntityType: params.ForgeEntityTypeOrganization,
+ }
orgPools := []params.Pool{}
for i := 1; i <= 3; i++ {
- pool, err := db.CreateOrganizationPool(
- context.Background(),
- org.ID,
+ pool, err := db.CreateEntityPool(
+ adminCtx,
+ entity,
params.CreatePoolParams{
ProviderName: "test-provider",
MaxRunners: 4,
@@ -75,7 +91,7 @@ func (s *PoolTestSuite) SetupTest() {
Image: fmt.Sprintf("test-image-%d", i),
Flavor: "test-flavor",
OSType: "linux",
- Tags: []string{"self-hosted", "amd64", "linux"},
+ Tags: []string{"amd64-linux-runner"},
RunnerBootstrapTimeout: 0,
},
)
@@ -97,6 +113,9 @@ func (s *PoolTestSuite) SetupTest() {
MinIdleRunners: &minIdleRunners,
Image: "test-images-updated",
Flavor: "test-flavor-updated",
+ Tags: []string{
+ "amd64-linux-runner",
+ },
},
CreateInstanceParams: params.CreateInstanceParams{
Name: "test-instance-name",
@@ -107,10 +126,9 @@ func (s *PoolTestSuite) SetupTest() {
// setup test runner
runner := &Runner{
- providers: fixtures.Providers,
- credentials: fixtures.Credentials,
- store: fixtures.Store,
- ctx: fixtures.AdminContext,
+ providers: fixtures.Providers,
+ store: fixtures.Store,
+ ctx: fixtures.AdminContext,
}
s.Runner = runner
}
@@ -151,7 +169,7 @@ func (s *PoolTestSuite) TestGetPoolByIDNotFound() {
s.Require().Nil(err)
_, err = s.Runner.GetPoolByID(s.Fixtures.AdminContext, s.Fixtures.Pools[0].ID)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: fetching pool by ID: not found", err.Error())
+ s.Require().Equal("error fetching pool: error fetching pool by ID: not found", err.Error())
}
func (s *PoolTestSuite) TestDeletePoolByID() {
@@ -160,7 +178,7 @@ func (s *PoolTestSuite) TestDeletePoolByID() {
s.Require().Nil(err)
_, err = s.Fixtures.Store.GetPoolByID(s.Fixtures.AdminContext, s.Fixtures.Pools[0].ID)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool by ID: not found", err.Error())
+ s.Require().Equal("error fetching pool by ID: not found", err.Error())
}
func (s *PoolTestSuite) TestDeletePoolByIDErrUnauthorized() {
@@ -181,6 +199,16 @@ func (s *PoolTestSuite) TestDeletePoolByIDRunnersFailed() {
s.Require().Equal(runnerErrors.NewBadRequestError("pool has runners"), err)
}
+func (s *PoolTestSuite) TestUpdatePoolByID() {
+ pool, err := s.Runner.UpdatePoolByID(s.Fixtures.AdminContext, s.Fixtures.Pools[0].ID, s.Fixtures.UpdatePoolParams)
+
+ s.Require().Nil(err)
+ s.Require().Equal(*s.Fixtures.UpdatePoolParams.MaxRunners, pool.MaxRunners)
+ s.Require().Equal(*s.Fixtures.UpdatePoolParams.MinIdleRunners, pool.MinIdleRunners)
+ s.Require().Equal(s.Fixtures.UpdatePoolParams.Image, pool.Image)
+ s.Require().Equal(s.Fixtures.UpdatePoolParams.Flavor, pool.Flavor)
+}
+
func (s *PoolTestSuite) TestUpdatePoolByIDErrUnauthorized() {
_, err := s.Runner.UpdatePoolByID(context.Background(), "dummy-pool-id", s.Fixtures.UpdatePoolParams)
@@ -192,12 +220,12 @@ func (s *PoolTestSuite) TestTestUpdatePoolByIDInvalidPoolID() {
_, err := s.Runner.UpdatePoolByID(s.Fixtures.AdminContext, "dummy-pool-id", s.Fixtures.UpdatePoolParams)
s.Require().NotNil(err)
- s.Require().Equal("fetching pool: fetching pool by ID: parsing id: invalid request", err.Error())
+ s.Require().Equal("error fetching pool: error fetching pool by ID: error parsing id: invalid request", err.Error())
}
func (s *PoolTestSuite) TestTestUpdatePoolByIDRunnerBootstrapTimeoutFailed() {
// this is already created in `SetupTest()`
- var RunnerBootstrapTimeout uint = 0
+ var RunnerBootstrapTimeout uint // default is 0
s.Fixtures.UpdatePoolParams.RunnerBootstrapTimeout = &RunnerBootstrapTimeout
_, err := s.Runner.UpdatePoolByID(s.Fixtures.AdminContext, s.Fixtures.Pools[0].ID, s.Fixtures.UpdatePoolParams)
diff --git a/runner/providers/common/common.go b/runner/providers/common/common.go
index dfa49f0d..f1a5a66d 100644
--- a/runner/providers/common/common.go
+++ b/runner/providers/common/common.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Cloudbase Solutions SRL
+// Copyright 2025 Cloudbase Solutions SRL
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
@@ -14,50 +14,24 @@
package common
-type InstanceStatus string
-type RunnerStatus string
-
-const (
- InstanceRunning InstanceStatus = "running"
- InstanceStopped InstanceStatus = "stopped"
- InstanceError InstanceStatus = "error"
- InstancePendingDelete InstanceStatus = "pending_delete"
- InstanceDeleting InstanceStatus = "deleting"
- InstancePendingCreate InstanceStatus = "pending_create"
- InstanceCreating InstanceStatus = "creating"
- InstanceStatusUnknown InstanceStatus = "unknown"
-
- RunnerIdle RunnerStatus = "idle"
- RunnerPending RunnerStatus = "pending"
- RunnerTerminated RunnerStatus = "terminated"
- RunnerInstalling RunnerStatus = "installing"
- RunnerFailed RunnerStatus = "failed"
- RunnerActive RunnerStatus = "active"
+import (
+ garmErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/runner/providers/util"
)
-// IsValidStatus checks if the given status is valid.
-func IsValidStatus(status InstanceStatus) bool {
- switch status {
- case InstanceRunning, InstanceError, InstancePendingCreate,
- InstancePendingDelete, InstanceStatusUnknown, InstanceStopped,
- InstanceCreating, InstanceDeleting:
-
- return true
- default:
- return false
+func ValidateResult(inst commonParams.ProviderInstance) error {
+ if inst.ProviderID == "" {
+ return garmErrors.NewProviderError("missing provider ID")
}
-}
-// IsProviderValidStatus checks if the given status is valid for the provider.
-// A provider should only return a status indicating that the instance is in a
-// lifecycle state that it can influence. The sole purpose of a provider is to
-// manage the lifecycle of an instance. Statuses that indicate an instance should
-// be created or removed, will be set by the controller.
-func IsValidProviderStatus(status InstanceStatus) bool {
- switch status {
- case InstanceRunning, InstanceError, InstanceStopped:
- return true
- default:
- return false
+ if inst.Name == "" {
+ return garmErrors.NewProviderError("missing instance name")
}
+
+ if !util.IsValidProviderStatus(inst.Status) {
+ return garmErrors.NewProviderError("invalid status returned (%s)", inst.Status)
+ }
+
+ return nil
}
diff --git a/runner/providers/external/execution/commands.go b/runner/providers/external/execution/commands.go
deleted file mode 100644
index 4d718a65..00000000
--- a/runner/providers/external/execution/commands.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package execution
-
-type ExecutionCommand string
-
-const (
- CreateInstanceCommand ExecutionCommand = "CreateInstance"
- DeleteInstanceCommand ExecutionCommand = "DeleteInstance"
- GetInstanceCommand ExecutionCommand = "GetInstance"
- ListInstancesCommand ExecutionCommand = "ListInstances"
- StartInstanceCommand ExecutionCommand = "StartInstance"
- StopInstanceCommand ExecutionCommand = "StopInstance"
- RemoveAllInstancesCommand ExecutionCommand = "RemoveAllInstances"
-)
diff --git a/runner/providers/external/execution/execution.go b/runner/providers/external/execution/execution.go
deleted file mode 100644
index 19fb9109..00000000
--- a/runner/providers/external/execution/execution.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package execution
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "io"
- "os"
-
- "github.com/cloudbase/garm/params"
-
- "github.com/mattn/go-isatty"
-)
-
-func GetEnvironment() (Environment, error) {
- env := Environment{
- Command: ExecutionCommand(os.Getenv("GARM_COMMAND")),
- ControllerID: os.Getenv("GARM_CONTROLLER_ID"),
- PoolID: os.Getenv("GARM_POOL_ID"),
- ProviderConfigFile: os.Getenv("GARM_PROVIDER_CONFIG_FILE"),
- InstanceID: os.Getenv("GARM_INSTANCE_ID"),
- }
-
- // If this is a CreateInstance command, we need to get the bootstrap params
- // from stdin
- if env.Command == CreateInstanceCommand {
- if isatty.IsTerminal(os.Stdin.Fd()) || isatty.IsCygwinTerminal(os.Stdin.Fd()) {
- return Environment{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
- }
-
- var data bytes.Buffer
- if _, err := io.Copy(&data, os.Stdin); err != nil {
- return Environment{}, fmt.Errorf("failed to copy bootstrap params")
- }
-
- if data.Len() == 0 {
- return Environment{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
- }
-
- var bootstrapParams params.BootstrapInstance
- if err := json.Unmarshal(data.Bytes(), &bootstrapParams); err != nil {
- return Environment{}, fmt.Errorf("failed to decode instance params: %w", err)
- }
- env.BootstrapParams = bootstrapParams
- }
-
- if err := env.Validate(); err != nil {
- return Environment{}, fmt.Errorf("failed to validate execution environment: %w", err)
- }
-
- return env, nil
-}
-
-type Environment struct {
- Command ExecutionCommand
- ControllerID string
- PoolID string
- ProviderConfigFile string
- InstanceID string
- BootstrapParams params.BootstrapInstance
-}
-
-func (e Environment) Validate() error {
- if e.Command == "" {
- return fmt.Errorf("missing GARM_COMMAND")
- }
-
- if e.ProviderConfigFile == "" {
- return fmt.Errorf("missing GARM_PROVIDER_CONFIG_FILE")
- }
-
- if _, err := os.Lstat(e.ProviderConfigFile); err != nil {
- return fmt.Errorf("error accessing config file: %w", err)
- }
-
- if e.ControllerID == "" {
- return fmt.Errorf("missing GARM_CONTROLLER_ID")
- }
-
- switch e.Command {
- case CreateInstanceCommand:
- if e.BootstrapParams.Name == "" {
- return fmt.Errorf("missing bootstrap params")
- }
- if e.ControllerID == "" {
- return fmt.Errorf("missing controller ID")
- }
- if e.PoolID == "" {
- return fmt.Errorf("missing pool ID")
- }
- case DeleteInstanceCommand, GetInstanceCommand,
- StartInstanceCommand, StopInstanceCommand:
- if e.InstanceID == "" {
- return fmt.Errorf("missing instance ID")
- }
- case ListInstancesCommand:
- if e.PoolID == "" {
- return fmt.Errorf("missing pool ID")
- }
- case RemoveAllInstancesCommand:
- if e.ControllerID == "" {
- return fmt.Errorf("missing controller ID")
- }
- default:
- return fmt.Errorf("unknown GARM_COMMAND: %s", e.Command)
- }
- return nil
-}
-
-func Run(ctx context.Context, provider ExternalProvider, env Environment) (string, error) {
- var ret string
- switch env.Command {
- case CreateInstanceCommand:
- instance, err := provider.CreateInstance(ctx, env.BootstrapParams)
- if err != nil {
- return "", fmt.Errorf("failed to create instance in provider: %w", err)
- }
-
- asJs, err := json.Marshal(instance)
- if err != nil {
- return "", fmt.Errorf("failed to marshal response: %w", err)
- }
- ret = string(asJs)
- case GetInstanceCommand:
- instance, err := provider.GetInstance(ctx, env.InstanceID)
- if err != nil {
- return "", fmt.Errorf("failed to get instance from provider: %w", err)
- }
- asJs, err := json.Marshal(instance)
- if err != nil {
- return "", fmt.Errorf("failed to marshal response: %w", err)
- }
- ret = string(asJs)
- case ListInstancesCommand:
- instances, err := provider.ListInstances(ctx, env.PoolID)
- if err != nil {
- return "", fmt.Errorf("failed to list instances from provider: %w", err)
- }
- asJs, err := json.Marshal(instances)
- if err != nil {
- return "", fmt.Errorf("failed to marshal response: %w", err)
- }
- ret = string(asJs)
- case DeleteInstanceCommand:
- if err := provider.DeleteInstance(ctx, env.InstanceID); err != nil {
- return "", fmt.Errorf("failed to delete instance from provider: %w", err)
- }
- case RemoveAllInstancesCommand:
- if err := provider.RemoveAllInstances(ctx); err != nil {
- return "", fmt.Errorf("failed to destroy environment: %w", err)
- }
- case StartInstanceCommand:
- if err := provider.Start(ctx, env.InstanceID); err != nil {
- return "", fmt.Errorf("failed to start instance: %w", err)
- }
- case StopInstanceCommand:
- if err := provider.Stop(ctx, env.InstanceID, true); err != nil {
- return "", fmt.Errorf("failed to stop instance: %w", err)
- }
- default:
- return "", fmt.Errorf("invalid command: %s", env.Command)
- }
- return ret, nil
-}
diff --git a/runner/providers/external/execution/exit_codes.go b/runner/providers/external/execution/exit_codes.go
deleted file mode 100644
index 40aefb37..00000000
--- a/runner/providers/external/execution/exit_codes.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package execution
-
-import (
- "errors"
-
- gErrors "github.com/cloudbase/garm/errors"
-)
-
-const (
- // ExitCodeNotFound is an exit code that indicates a Not Found error
- ExitCodeNotFound int = 30
- // ExitCodeDuplicate is an exit code that indicates a duplicate error
- ExitCodeDuplicate int = 31
-)
-
-func ResolveErrorToExitCode(err error) int {
- if err != nil {
- if errors.Is(err, gErrors.ErrNotFound) {
- return ExitCodeNotFound
- } else if errors.Is(err, gErrors.ErrDuplicateEntity) {
- return ExitCodeDuplicate
- }
- return 1
- }
- return 0
-}
diff --git a/runner/providers/external/execution/interface.go b/runner/providers/external/execution/interface.go
deleted file mode 100644
index 7c8cc90f..00000000
--- a/runner/providers/external/execution/interface.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package execution
-
-import (
- "context"
-
- "github.com/cloudbase/garm/params"
-)
-
-// ExternalProvider defines an interface that external providers need to implement.
-// This is very similar to the common.Provider interface, and was redefined here to
-// decouple it, in case it may diverge from native providers.
-type ExternalProvider interface {
- // CreateInstance creates a new compute instance in the provider.
- CreateInstance(ctx context.Context, bootstrapParams params.BootstrapInstance) (params.Instance, error)
- // Delete instance will delete the instance in a provider.
- DeleteInstance(ctx context.Context, instance string) error
- // GetInstance will return details about one instance.
- GetInstance(ctx context.Context, instance string) (params.Instance, error)
- // ListInstances will list all instances for a provider.
- ListInstances(ctx context.Context, poolID string) ([]params.Instance, error)
- // RemoveAllInstances will remove all instances created by this provider.
- RemoveAllInstances(ctx context.Context) error
- // Stop shuts down the instance.
- Stop(ctx context.Context, instance string, force bool) error
- // Start boots up an instance.
- Start(ctx context.Context, instance string) error
-}
diff --git a/runner/providers/external/external.go b/runner/providers/external/external.go
index 1c2ec1f2..46e3dd47 100644
--- a/runner/providers/external/external.go
+++ b/runner/providers/external/external.go
@@ -1,225 +1,37 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
package external
import (
"context"
- "encoding/json"
"fmt"
- "log"
- "os/exec"
"github.com/cloudbase/garm/config"
- garmErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
- providerCommon "github.com/cloudbase/garm/runner/providers/common"
- "github.com/cloudbase/garm/runner/providers/external/execution"
- garmExec "github.com/cloudbase/garm/util/exec"
-
- "github.com/pkg/errors"
+ v010 "github.com/cloudbase/garm/runner/providers/v0.1.0"
+ v011 "github.com/cloudbase/garm/runner/providers/v0.1.1"
)
-var _ common.Provider = (*external)(nil)
-
+// NewProvider selects the provider based on the interface version
func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) {
- if cfg.ProviderType != params.ExternalProvider {
- return nil, garmErrors.NewBadRequestError("invalid provider config")
- }
-
- execPath, err := cfg.External.ExecutablePath()
- if err != nil {
- return nil, errors.Wrap(err, "fetching executable path")
- }
- return &external{
- ctx: ctx,
- controllerID: controllerID,
- cfg: cfg,
- execPath: execPath,
- }, nil
-}
-
-type external struct {
- ctx context.Context
- controllerID string
- cfg *config.Provider
- execPath string
-}
-
-func (e *external) validateResult(inst params.Instance) error {
- if inst.ProviderID == "" {
- return garmErrors.NewProviderError("missing provider ID")
- }
-
- if inst.Name == "" {
- return garmErrors.NewProviderError("missing instance name")
- }
-
- if inst.OSName == "" || inst.OSArch == "" || inst.OSType == "" {
- // we can still function without this info (I think)
- log.Printf("WARNING: missing OS information")
- }
- if !providerCommon.IsValidProviderStatus(inst.Status) {
- return garmErrors.NewProviderError("invalid status returned (%s)", inst.Status)
- }
-
- return nil
-}
-
-// CreateInstance creates a new compute instance in the provider.
-func (e *external) CreateInstance(ctx context.Context, bootstrapParams params.BootstrapInstance) (params.Instance, error) {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.CreateInstanceCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_POOL_ID=%s", bootstrapParams.PoolID),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
-
- asJs, err := json.Marshal(bootstrapParams)
- if err != nil {
- return params.Instance{}, errors.Wrap(err, "serializing bootstrap params")
- }
-
- out, err := garmExec.Exec(ctx, e.execPath, asJs, asEnv)
- if err != nil {
- return params.Instance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
-
- var param params.Instance
- if err := json.Unmarshal(out, ¶m); err != nil {
- return params.Instance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
- }
-
- if err := e.validateResult(param); err != nil {
- return params.Instance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
- }
-
- retAsJs, _ := json.MarshalIndent(param, "", " ")
- log.Printf("provider returned: %s", string(retAsJs))
- return param, nil
-}
-
-// Delete instance will delete the instance in a provider.
-func (e *external) DeleteInstance(ctx context.Context, instance string) error {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.DeleteInstanceCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
-
- _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- var exitErr *exec.ExitError
- if !errors.As(err, &exitErr) || exitErr.ExitCode() != execution.ExitCodeNotFound {
- return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
-
- }
- return nil
-}
-
-// GetInstance will return details about one instance.
-func (e *external) GetInstance(ctx context.Context, instance string) (params.Instance, error) {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.GetInstanceCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
-
- // TODO(gabriel-samfira): handle error types. Of particular insterest is to
- // know when the error is ErrNotFound.
- out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- return params.Instance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
-
- var param params.Instance
- if err := json.Unmarshal(out, ¶m); err != nil {
- return params.Instance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
- }
-
- if err := e.validateResult(param); err != nil {
- return params.Instance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
- }
-
- return param, nil
-}
-
-// ListInstances will list all instances for a provider.
-func (e *external) ListInstances(ctx context.Context, poolID string) ([]params.Instance, error) {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.ListInstancesCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_POOL_ID=%s", poolID),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
-
- out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- return []params.Instance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
-
- var param []params.Instance
- if err := json.Unmarshal(out, ¶m); err != nil {
- return []params.Instance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
- }
-
- for _, inst := range param {
- if err := e.validateResult(inst); err != nil {
- return []params.Instance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
- }
- }
- return param, nil
-}
-
-// RemoveAllInstances will remove all instances created by this provider.
-func (e *external) RemoveAllInstances(ctx context.Context) error {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.RemoveAllInstancesCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
- _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
- return nil
-}
-
-// Stop shuts down the instance.
-func (e *external) Stop(ctx context.Context, instance string, force bool) error {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.StopInstanceCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
- _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
- return nil
-}
-
-// Start boots up an instance.
-func (e *external) Start(ctx context.Context, instance string) error {
- asEnv := []string{
- fmt.Sprintf("GARM_COMMAND=%s", execution.StartInstanceCommand),
- fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
- fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
- fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
- }
- _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
- if err != nil {
- return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
- }
- return nil
-}
-
-func (e *external) AsParams() params.Provider {
- return params.Provider{
- Name: e.cfg.Name,
- Description: e.cfg.Description,
- ProviderType: e.cfg.ProviderType,
+ switch cfg.External.InterfaceVersion {
+ case common.Version010, "":
+ return v010.NewProvider(ctx, cfg, controllerID)
+ case common.Version011:
+ return v011.NewProvider(ctx, cfg, controllerID)
+ default:
+ return nil, fmt.Errorf("unsupported interface version: %s", cfg.External.InterfaceVersion)
}
}
diff --git a/runner/providers/lxd/images.go b/runner/providers/lxd/images.go
deleted file mode 100644
index 8e407c56..00000000
--- a/runner/providers/lxd/images.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lxd
-
-import (
- "fmt"
- "strings"
-
- "github.com/cloudbase/garm/config"
- runnerErrors "github.com/cloudbase/garm/errors"
-
- lxd "github.com/lxc/lxd/client"
- "github.com/lxc/lxd/shared/api"
- "github.com/pkg/errors"
-)
-
-type image struct {
- remotes map[string]config.LXDImageRemote
-}
-
-// parseImageName parses the image name that comes in from the config and returns a
-// remote. If no remote is configured with the given name, an error is returned.
-func (i *image) parseImageName(imageName string) (config.LXDImageRemote, string, error) {
- if !strings.Contains(imageName, ":") {
- return config.LXDImageRemote{}, "", fmt.Errorf("image does not include a remote")
- }
-
- details := strings.SplitN(imageName, ":", 2)
- for remoteName, val := range i.remotes {
- if remoteName == details[0] {
- return val, details[1], nil
- }
- }
- return config.LXDImageRemote{}, "", runnerErrors.ErrNotFound
-}
-
-func (i *image) getLocalImageByAlias(imageName string, imageType config.LXDImageType, arch string, cli lxd.InstanceServer) (*api.Image, error) {
- aliases, err := cli.GetImageAliasArchitectures(imageType.String(), imageName)
- if err != nil {
- return nil, errors.Wrapf(err, "resolving alias: %s", imageName)
- }
-
- alias, ok := aliases[arch]
- if !ok {
- return nil, fmt.Errorf("no image found for arch %s and image type %s with name %s", arch, imageType, imageName)
- }
-
- image, _, err := cli.GetImage(alias.Target)
- if err != nil {
- return nil, errors.Wrap(err, "fetching image details")
- }
- return image, nil
-}
-
-func (i *image) getInstanceSource(imageName string, imageType config.LXDImageType, arch string, cli lxd.InstanceServer) (api.InstanceSource, error) {
- instanceSource := api.InstanceSource{
- Type: "image",
- }
- if !strings.Contains(imageName, ":") {
- // A remote was not specified, try to find an image using the imageName as
- // an alias.
- imageDetails, err := i.getLocalImageByAlias(imageName, imageType, arch, cli)
- if err != nil {
- return api.InstanceSource{}, errors.Wrap(err, "fetching image")
- }
- instanceSource.Fingerprint = imageDetails.Fingerprint
- } else {
- remote, parsedName, err := i.parseImageName(imageName)
- if err != nil {
- return api.InstanceSource{}, errors.Wrap(err, "parsing image name")
- }
- instanceSource.Alias = parsedName
- instanceSource.Server = remote.Address
- instanceSource.Protocol = string(remote.Protocol)
- }
- return instanceSource, nil
-}
diff --git a/runner/providers/lxd/lxd.go b/runner/providers/lxd/lxd.go
deleted file mode 100644
index 270073a2..00000000
--- a/runner/providers/lxd/lxd.go
+++ /dev/null
@@ -1,521 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lxd
-
-import (
- "context"
- "fmt"
- "log"
- "sync"
- "time"
-
- "github.com/cloudbase/garm/config"
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/common"
- "github.com/cloudbase/garm/util"
-
- "github.com/google/go-github/v48/github"
- lxd "github.com/lxc/lxd/client"
- "github.com/lxc/lxd/shared/api"
- "github.com/pkg/errors"
-)
-
-var _ common.Provider = &LXD{}
-
-const (
- // We look for this key in the config of the instances to determine if they are
- // created by us or not.
- controllerIDKeyName = "user.runner-controller-id"
- poolIDKey = "user.runner-pool-id"
-
- // osTypeKeyName is the key we use in the instance config to indicate the OS
- // platform a runner is supposed to have. This value is defined in the pool and
- // passed into the provider as bootstrap params.
- osTypeKeyName = "user.os-type"
-
- // osArchKeyNAme is the key we use in the instance config to indicate the OS
- // architecture a runner is supposed to have. This value is defined in the pool and
- // passed into the provider as bootstrap params.
- osArchKeyNAme = "user.os-arch"
-)
-
-var (
- // lxdToGithubArchMap translates LXD architectures to Github tools architectures.
- // TODO: move this in a separate package. This will most likely be used
- // by any other provider.
- lxdToGithubArchMap map[string]string = map[string]string{
- "x86_64": "x64",
- "amd64": "x64",
- "armv7l": "arm",
- "aarch64": "arm64",
- "x64": "x64",
- "arm": "arm",
- "arm64": "arm64",
- }
-
- configToLXDArchMap map[params.OSArch]string = map[params.OSArch]string{
- params.Amd64: "x86_64",
- params.Arm64: "aarch64",
- params.Arm: "armv7l",
- }
-
- lxdToConfigArch map[string]params.OSArch = map[string]params.OSArch{
- "x86_64": params.Amd64,
- "aarch64": params.Arm64,
- "armv7l": params.Arm,
- }
-)
-
-const (
- DefaultProjectDescription = "This project was created automatically by garm to be used for github ephemeral action runners."
- DefaultProjectName = "garm-project"
-)
-
-func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) {
- if err := cfg.Validate(); err != nil {
- return nil, errors.Wrap(err, "validating provider config")
- }
-
- if cfg.ProviderType != params.LXDProvider {
- return nil, fmt.Errorf("invalid provider type %s, expected %s", cfg.ProviderType, params.LXDProvider)
- }
-
- provider := &LXD{
- ctx: ctx,
- cfg: cfg,
- controllerID: controllerID,
- imageManager: &image{
- remotes: cfg.LXD.ImageRemotes,
- },
- }
-
- return provider, nil
-}
-
-type LXD struct {
- // cfg is the provider config for this provider.
- cfg *config.Provider
- // ctx is the context.
- ctx context.Context
- // cli is the LXD client.
- cli lxd.InstanceServer
- // imageManager downloads images from remotes
- imageManager *image
- // controllerID is the ID of this controller
- controllerID string
-
- mux sync.Mutex
-}
-
-func (l *LXD) getCLI() (lxd.InstanceServer, error) {
- l.mux.Lock()
- defer l.mux.Unlock()
-
- if l.cli != nil {
- return l.cli, nil
- }
- cli, err := getClientFromConfig(l.ctx, &l.cfg.LXD)
- if err != nil {
- return nil, errors.Wrap(err, "creating LXD client")
- }
-
- _, _, err = cli.GetProject(projectName(l.cfg.LXD))
- if err != nil {
- return nil, errors.Wrapf(err, "fetching project name: %s", projectName(l.cfg.LXD))
- }
- cli = cli.UseProject(projectName(l.cfg.LXD))
- l.cli = cli
-
- return cli, nil
-}
-
-func (l *LXD) getProfiles(flavor string) ([]string, error) {
- ret := []string{}
- if l.cfg.LXD.IncludeDefaultProfile {
- ret = append(ret, "default")
- }
-
- set := map[string]struct{}{}
-
- cli, err := l.getCLI()
- if err != nil {
- return nil, errors.Wrap(err, "fetching client")
- }
-
- profiles, err := cli.GetProfileNames()
- if err != nil {
- return nil, errors.Wrap(err, "fetching profile names")
- }
- for _, profile := range profiles {
- set[profile] = struct{}{}
- }
-
- if _, ok := set[flavor]; !ok {
- return nil, errors.Wrapf(runnerErrors.ErrNotFound, "looking for profile %s", flavor)
- }
-
- ret = append(ret, flavor)
- return ret, nil
-}
-
-func (l *LXD) getTools(tools []*github.RunnerApplicationDownload, osType params.OSType, architecture string) (github.RunnerApplicationDownload, error) {
- // Validate image OS. Linux only for now.
- switch osType {
- case params.Linux:
- default:
- return github.RunnerApplicationDownload{}, fmt.Errorf("this provider does not support OS type: %s", osType)
- }
-
- // Find tools for OS/Arch.
- for _, tool := range tools {
- if tool == nil {
- continue
- }
- if tool.OS == nil || tool.Architecture == nil {
- continue
- }
-
- // fmt.Println(*tool.Architecture, *tool.OS)
- // fmt.Printf("image arch: %s --> osType: %s\n", image.Architecture, string(osType))
- if *tool.Architecture == architecture && *tool.OS == string(osType) {
- return *tool, nil
- }
-
- arch, ok := lxdToGithubArchMap[architecture]
- if ok && arch == *tool.Architecture && *tool.OS == string(osType) {
- return *tool, nil
- }
- }
- return github.RunnerApplicationDownload{}, fmt.Errorf("failed to find tools for OS %s and arch %s", osType, architecture)
-}
-
-// sadly, the security.secureboot flag is a string encoded boolean.
-func (l *LXD) secureBootEnabled() string {
- if l.cfg.LXD.SecureBoot {
- return "true"
- }
- return "false"
-}
-
-func (l *LXD) getCreateInstanceArgs(bootstrapParams params.BootstrapInstance, specs extraSpecs) (api.InstancesPost, error) {
- if bootstrapParams.Name == "" {
- return api.InstancesPost{}, runnerErrors.NewBadRequestError("missing name")
- }
- profiles, err := l.getProfiles(bootstrapParams.Flavor)
- if err != nil {
- return api.InstancesPost{}, errors.Wrap(err, "fetching profiles")
- }
-
- arch, err := resolveArchitecture(bootstrapParams.OSArch)
- if err != nil {
- return api.InstancesPost{}, errors.Wrap(err, "fetching archictecture")
- }
-
- instanceType := l.cfg.LXD.GetInstanceType()
- instanceSource, err := l.imageManager.getInstanceSource(bootstrapParams.Image, instanceType, arch, l.cli)
- if err != nil {
- return api.InstancesPost{}, errors.Wrap(err, "getting instance source")
- }
-
- tools, err := l.getTools(bootstrapParams.Tools, bootstrapParams.OSType, arch)
- if err != nil {
- return api.InstancesPost{}, errors.Wrap(err, "getting tools")
- }
-
- bootstrapParams.UserDataOptions.DisableUpdatesOnBoot = specs.DisableUpdates
- bootstrapParams.UserDataOptions.ExtraPackages = specs.ExtraPackages
- cloudCfg, err := util.GetCloudConfig(bootstrapParams, tools, bootstrapParams.Name)
- if err != nil {
- return api.InstancesPost{}, errors.Wrap(err, "generating cloud-config")
- }
-
- configMap := map[string]string{
- "user.user-data": cloudCfg,
- osTypeKeyName: string(bootstrapParams.OSType),
- osArchKeyNAme: string(bootstrapParams.OSArch),
- controllerIDKeyName: l.controllerID,
- poolIDKey: bootstrapParams.PoolID,
- }
-
- if instanceType == config.LXDImageVirtualMachine {
- configMap["security.secureboot"] = l.secureBootEnabled()
- }
-
- args := api.InstancesPost{
- InstancePut: api.InstancePut{
- Architecture: arch,
- Profiles: profiles,
- Description: "Github runner provisioned by garm",
- Config: configMap,
- },
- Source: instanceSource,
- Name: bootstrapParams.Name,
- Type: api.InstanceType(instanceType),
- }
- return args, nil
-}
-
-func (l *LXD) AsParams() params.Provider {
- return params.Provider{
- Name: l.cfg.Name,
- ProviderType: l.cfg.ProviderType,
- Description: l.cfg.Description,
- }
-}
-
-func (l *LXD) launchInstance(createArgs api.InstancesPost) error {
- cli, err := l.getCLI()
- if err != nil {
- return errors.Wrap(err, "fetching client")
- }
- // Get LXD to create the instance (background operation)
- op, err := cli.CreateInstance(createArgs)
- if err != nil {
- return errors.Wrap(err, "creating instance")
- }
-
- // Wait for the operation to complete
- err = op.Wait()
- if err != nil {
- return errors.Wrap(err, "waiting for instance creation")
- }
-
- // Get LXD to start the instance (background operation)
- reqState := api.InstanceStatePut{
- Action: "start",
- Timeout: -1,
- }
-
- op, err = cli.UpdateInstanceState(createArgs.Name, reqState, "")
- if err != nil {
- return errors.Wrap(err, "starting instance")
- }
-
- // Wait for the operation to complete
- err = op.Wait()
- if err != nil {
- return errors.Wrap(err, "waiting for instance to start")
- }
- return nil
-}
-
-// CreateInstance creates a new compute instance in the provider.
-func (l *LXD) CreateInstance(ctx context.Context, bootstrapParams params.BootstrapInstance) (params.Instance, error) {
- extraSpecs, err := parseExtraSpecsFromBootstrapParams(bootstrapParams)
- if err != nil {
- return params.Instance{}, errors.Wrap(err, "parsing extra specs")
- }
- args, err := l.getCreateInstanceArgs(bootstrapParams, extraSpecs)
- if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching create args")
- }
-
- if err := l.launchInstance(args); err != nil {
- return params.Instance{}, errors.Wrap(err, "creating instance")
- }
-
- ret, err := l.waitInstanceHasIP(ctx, args.Name)
- if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching instance")
- }
-
- return ret, nil
-}
-
-// GetInstance will return details about one instance.
-func (l *LXD) GetInstance(ctx context.Context, instanceName string) (params.Instance, error) {
- cli, err := l.getCLI()
- if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching client")
- }
- instance, _, err := cli.GetInstanceFull(instanceName)
- if err != nil {
- if isNotFoundError(err) {
- return params.Instance{}, errors.Wrapf(runnerErrors.ErrNotFound, "fetching instance: %q", err)
- }
- return params.Instance{}, errors.Wrap(err, "fetching instance")
- }
-
- return lxdInstanceToAPIInstance(instance), nil
-}
-
-// Delete instance will delete the instance in a provider.
-func (l *LXD) DeleteInstance(ctx context.Context, instance string) error {
- cli, err := l.getCLI()
- if err != nil {
- return errors.Wrap(err, "fetching client")
- }
-
- if err := l.setState(instance, "stop", true); err != nil {
- if isNotFoundError(err) {
- log.Printf("received not found error when stopping instance %s", instance)
- return nil
- }
- // I am not proud of this, but the drivers.ErrInstanceIsStopped from LXD pulls in
- // a ton of CGO, linux specific dependencies, that don't make sense having
- // in garm.
- if !(errors.Cause(err).Error() == errInstanceIsStopped.Error()) {
- return errors.Wrap(err, "stopping instance")
- }
- }
-
- opResponse := make(chan struct {
- op lxd.Operation
- err error
- })
- var op lxd.Operation
- go func() {
- op, err := cli.DeleteInstance(instance)
- opResponse <- struct {
- op lxd.Operation
- err error
- }{op: op, err: err}
- }()
-
- select {
- case resp := <-opResponse:
- if resp.err != nil {
- if isNotFoundError(resp.err) {
- log.Printf("received not found error when deleting instance %s", instance)
- return nil
- }
- return errors.Wrap(resp.err, "removing instance")
- }
- op = resp.op
- case <-time.After(time.Second * 60):
- return errors.Wrapf(runnerErrors.ErrTimeout, "removing instance %s", instance)
- }
-
- opTimeout, cancel := context.WithTimeout(context.Background(), time.Second*60)
- defer cancel()
- err = op.WaitContext(opTimeout)
- if err != nil {
- if isNotFoundError(err) {
- log.Printf("received not found error when waiting for instance deletion %s", instance)
- return nil
- }
- return errors.Wrap(err, "waiting for instance deletion")
- }
- return nil
-}
-
-type listResponse struct {
- instances []api.InstanceFull
- err error
-}
-
-// ListInstances will list all instances for a provider.
-func (l *LXD) ListInstances(ctx context.Context, poolID string) ([]params.Instance, error) {
- cli, err := l.getCLI()
- if err != nil {
- return []params.Instance{}, errors.Wrap(err, "fetching client")
- }
-
- result := make(chan listResponse, 1)
-
- go func() {
- // TODO(gabriel-samfira): if this blocks indefinitely, we will leak a goroutine.
- // Convert the internal provider to an external one. Running the provider as an
- // external process will allow us to not care if a goroutine leaks. Once a timeout
- // is reached, the provider can just exit with an error. Something we can't do with
- // internal providers.
- instances, err := cli.GetInstancesFull(api.InstanceTypeAny)
- result <- listResponse{
- instances: instances,
- err: err,
- }
- }()
-
- var instances []api.InstanceFull
- select {
- case res := <-result:
- if res.err != nil {
- return []params.Instance{}, errors.Wrap(res.err, "fetching instances")
- }
- instances = res.instances
- case <-time.After(time.Second * 60):
- return []params.Instance{}, errors.Wrap(runnerErrors.ErrTimeout, "fetching instances from provider")
- }
-
- ret := []params.Instance{}
-
- for _, instance := range instances {
- if id, ok := instance.ExpandedConfig[controllerIDKeyName]; ok && id == l.controllerID {
- if poolID != "" {
- id := instance.ExpandedConfig[poolIDKey]
- if id != poolID {
- // Pool ID was specified. Filter out instances belonging to other pools.
- continue
- }
- }
- ret = append(ret, lxdInstanceToAPIInstance(&instance))
- }
- }
-
- return ret, nil
-}
-
-// RemoveAllInstances will remove all instances created by this provider.
-func (l *LXD) RemoveAllInstances(ctx context.Context) error {
- instances, err := l.ListInstances(ctx, "")
- if err != nil {
- return errors.Wrap(err, "fetching instance list")
- }
-
- for _, instance := range instances {
- // TODO: remove in parallel
- if err := l.DeleteInstance(ctx, instance.Name); err != nil {
- return errors.Wrapf(err, "removing instance %s", instance.Name)
- }
- }
-
- return nil
-}
-
-func (l *LXD) setState(instance, state string, force bool) error {
- reqState := api.InstanceStatePut{
- Action: state,
- Timeout: -1,
- Force: force,
- }
-
- cli, err := l.getCLI()
- if err != nil {
- return errors.Wrap(err, "fetching client")
- }
-
- op, err := cli.UpdateInstanceState(instance, reqState, "")
- if err != nil {
- return errors.Wrapf(err, "setting state to %s", state)
- }
- ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second*60)
- defer cancel()
- err = op.WaitContext(ctxTimeout)
- if err != nil {
- return errors.Wrapf(err, "waiting for instance to transition to state %s", state)
- }
- return nil
-}
-
-// Stop shuts down the instance.
-func (l *LXD) Stop(ctx context.Context, instance string, force bool) error {
- return l.setState(instance, "stop", force)
-}
-
-// Start boots up an instance.
-func (l *LXD) Start(ctx context.Context, instance string) error {
- return l.setState(instance, "start", false)
-}
diff --git a/runner/providers/lxd/specs.go b/runner/providers/lxd/specs.go
deleted file mode 100644
index 202473b7..00000000
--- a/runner/providers/lxd/specs.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2023 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lxd
-
-import (
- "encoding/json"
-
- "github.com/cloudbase/garm/params"
- "github.com/pkg/errors"
-)
-
-type extraSpecs struct {
- DisableUpdates bool `json:"disable_updates"`
- ExtraPackages []string `json:"extra_packages"`
-}
-
-func parseExtraSpecsFromBootstrapParams(bootstrapParams params.BootstrapInstance) (extraSpecs, error) {
- specs := extraSpecs{}
- if bootstrapParams.ExtraSpecs == nil {
- return specs, nil
- }
-
- if err := json.Unmarshal(bootstrapParams.ExtraSpecs, &specs); err != nil {
- return specs, errors.Wrap(err, "unmarshaling extra specs")
- }
- return specs, nil
-}
diff --git a/runner/providers/lxd/util.go b/runner/providers/lxd/util.go
deleted file mode 100644
index f029037d..00000000
--- a/runner/providers/lxd/util.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2022 Cloudbase Solutions SRL
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package lxd
-
-import (
- "context"
- "database/sql"
- "fmt"
- "log"
- "net"
- "net/http"
- "os"
- "strings"
- "time"
-
- "github.com/cloudbase/garm/config"
- "github.com/cloudbase/garm/params"
- "github.com/cloudbase/garm/runner/providers/common"
- "github.com/cloudbase/garm/util"
-
- "github.com/juju/clock"
- "github.com/juju/retry"
- lxd "github.com/lxc/lxd/client"
- "github.com/lxc/lxd/shared/api"
- "github.com/pkg/errors"
-)
-
-var (
- //lint:ignore ST1005 imported error from lxd
- errInstanceIsStopped error = fmt.Errorf("The instance is already stopped")
-)
-
-var httpResponseErrors = map[int][]error{
- http.StatusNotFound: {os.ErrNotExist, sql.ErrNoRows},
-}
-
-// isNotFoundError returns true if the error is considered a Not Found error.
-func isNotFoundError(err error) bool {
- if api.StatusErrorCheck(err, http.StatusNotFound) {
- return true
- }
-
- for _, checkErr := range httpResponseErrors[http.StatusNotFound] {
- if errors.Is(err, checkErr) {
- return true
- }
- }
-
- return false
-}
-
-func lxdInstanceToAPIInstance(instance *api.InstanceFull) params.Instance {
- lxdOS, ok := instance.ExpandedConfig["image.os"]
- if !ok {
- log.Printf("failed to find OS in instance config")
- }
-
- osType, err := util.OSToOSType(lxdOS)
- if err != nil {
- log.Printf("failed to find OS type for OS %s", lxdOS)
- }
-
- if osType == "" {
- osTypeFromTag, ok := instance.ExpandedConfig[osTypeKeyName]
- if !ok {
- log.Printf("failed to find OS type in fallback location")
- }
- osType = params.OSType(osTypeFromTag)
- }
-
- osRelease, ok := instance.ExpandedConfig["image.release"]
- if !ok {
- log.Printf("failed to find OS release instance config")
- }
-
- state := instance.State
- addresses := []params.Address{}
- if state.Network != nil {
- for _, details := range state.Network {
- for _, addr := range details.Addresses {
- if addr.Scope != "global" {
- continue
- }
- addresses = append(addresses, params.Address{
- Address: addr.Address,
- Type: params.PublicAddress,
- })
- }
- }
- }
-
- instanceArch, ok := lxdToConfigArch[instance.Architecture]
- if !ok {
- log.Printf("failed to find OS architecture")
- }
-
- return params.Instance{
- OSArch: instanceArch,
- ProviderID: instance.Name,
- Name: instance.Name,
- OSType: osType,
- OSName: strings.ToLower(lxdOS),
- OSVersion: osRelease,
- Addresses: addresses,
- Status: lxdStatusToProviderStatus(state.Status),
- }
-}
-
-func lxdStatusToProviderStatus(status string) common.InstanceStatus {
- switch status {
- case "Running":
- return common.InstanceRunning
- case "Stopped":
- return common.InstanceStopped
- default:
- return common.InstanceStatusUnknown
- }
-}
-
-func getClientFromConfig(ctx context.Context, cfg *config.LXD) (cli lxd.InstanceServer, err error) {
- if cfg.UnixSocket != "" {
- return lxd.ConnectLXDUnixWithContext(ctx, cfg.UnixSocket, nil)
- }
-
- var srvCrtContents, tlsCAContents, clientCertContents, clientKeyContents []byte
-
- if cfg.TLSServerCert != "" {
- srvCrtContents, err = os.ReadFile(cfg.TLSServerCert)
- if err != nil {
- return nil, errors.Wrap(err, "reading TLSServerCert")
- }
- }
-
- if cfg.TLSCA != "" {
- tlsCAContents, err = os.ReadFile(cfg.TLSCA)
- if err != nil {
- return nil, errors.Wrap(err, "reading TLSCA")
- }
- }
-
- if cfg.ClientCertificate != "" {
- clientCertContents, err = os.ReadFile(cfg.ClientCertificate)
- if err != nil {
- return nil, errors.Wrap(err, "reading ClientCertificate")
- }
- }
-
- if cfg.ClientKey != "" {
- clientKeyContents, err = os.ReadFile(cfg.ClientKey)
- if err != nil {
- return nil, errors.Wrap(err, "reading ClientKey")
- }
- }
-
- connectArgs := lxd.ConnectionArgs{
- TLSServerCert: string(srvCrtContents),
- TLSCA: string(tlsCAContents),
- TLSClientCert: string(clientCertContents),
- TLSClientKey: string(clientKeyContents),
- }
-
- lxdCLI, err := lxd.ConnectLXD(cfg.URL, &connectArgs)
- if err != nil {
- return nil, errors.Wrap(err, "connecting to LXD")
- }
-
- return lxdCLI, nil
-}
-
-func projectName(cfg config.LXD) string {
- if cfg.ProjectName != "" {
- return cfg.ProjectName
- }
- return DefaultProjectName
-}
-
-func resolveArchitecture(osArch params.OSArch) (string, error) {
- if string(osArch) == "" {
- return configToLXDArchMap[params.Amd64], nil
- }
- arch, ok := configToLXDArchMap[osArch]
- if !ok {
- return "", fmt.Errorf("architecture %s is not supported", osArch)
- }
- return arch, nil
-}
-
-// waitDeviceActive is a function capable of figuring out when a Equinix Metal
-// device is active
-func (l *LXD) waitInstanceHasIP(ctx context.Context, instanceName string) (params.Instance, error) {
- var p params.Instance
- var errIPNotFound error = fmt.Errorf("ip not found")
- err := retry.Call(retry.CallArgs{
- Func: func() error {
- var err error
- p, err = l.GetInstance(ctx, instanceName)
- if err != nil {
- return errors.Wrap(err, "fetching instance")
- }
- for _, addr := range p.Addresses {
- ip := net.ParseIP(addr.Address)
- if ip == nil {
- continue
- }
- if ip.To4() == nil {
- continue
- }
- return nil
- }
- return errIPNotFound
- },
- Attempts: 20,
- Delay: 5 * time.Second,
- Clock: clock.WallClock,
- })
-
- if err != nil && err != errIPNotFound {
- return params.Instance{}, err
- }
-
- return p, nil
-}
diff --git a/runner/providers/providers.go b/runner/providers/providers.go
index 2e1f0d1b..ada11729 100644
--- a/runner/providers/providers.go
+++ b/runner/providers/providers.go
@@ -16,15 +16,13 @@ package providers
import (
"context"
- "log"
+ "fmt"
+ "log/slog"
"github.com/cloudbase/garm/config"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
"github.com/cloudbase/garm/runner/providers/external"
- "github.com/cloudbase/garm/runner/providers/lxd"
-
- "github.com/pkg/errors"
)
// LoadProvidersFromConfig loads all providers from the config and populates
@@ -32,22 +30,19 @@ import (
func LoadProvidersFromConfig(ctx context.Context, cfg config.Config, controllerID string) (map[string]common.Provider, error) {
providers := make(map[string]common.Provider, len(cfg.Providers))
for _, providerCfg := range cfg.Providers {
- log.Printf("Loading provider %s", providerCfg.Name)
+ slog.InfoContext(
+ ctx, "Loading provider",
+ "provider", providerCfg.Name)
switch providerCfg.ProviderType {
- case params.LXDProvider:
- conf := providerCfg
- provider, err := lxd.NewProvider(ctx, &conf, controllerID)
- if err != nil {
- return nil, errors.Wrap(err, "creating provider")
- }
- providers[providerCfg.Name] = provider
case params.ExternalProvider:
conf := providerCfg
provider, err := external.NewProvider(ctx, &conf, controllerID)
if err != nil {
- return nil, errors.Wrap(err, "creating provider")
+ return nil, fmt.Errorf("error creating provider: %w", err)
}
providers[providerCfg.Name] = provider
+ default:
+ return nil, fmt.Errorf("unknown provider type %s", providerCfg.ProviderType)
}
}
return providers, nil
diff --git a/runner/providers/util/util.go b/runner/providers/util/util.go
new file mode 100644
index 00000000..fb3c12bd
--- /dev/null
+++ b/runner/providers/util/util.go
@@ -0,0 +1,35 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package util
+
+import (
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+)
+
+// IsProviderValidStatus checks if the given status is valid for the provider.
+// A provider should only return a status indicating that the instance is in a
+// lifecycle state that it can influence. The sole purpose of a provider is to
+// manage the lifecycle of an instance. Statuses that indicate an instance should
+// be created or removed, will be set by the controller.
+func IsValidProviderStatus(status commonParams.InstanceStatus) bool {
+ switch status {
+ case commonParams.InstanceRunning, commonParams.InstanceError,
+ commonParams.InstanceStopped, commonParams.InstanceStatusUnknown:
+
+ return true
+ default:
+ return false
+ }
+}
diff --git a/runner/providers/v0.1.0/external.go b/runner/providers/v0.1.0/external.go
new file mode 100644
index 00000000..bb96f4d7
--- /dev/null
+++ b/runner/providers/v0.1.0/external.go
@@ -0,0 +1,339 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package v010
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "os/exec"
+
+ garmErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonExecution "github.com/cloudbase/garm-provider-common/execution/common"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ garmExec "github.com/cloudbase/garm-provider-common/util/exec"
+ "github.com/cloudbase/garm/config"
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner/common"
+ commonExternal "github.com/cloudbase/garm/runner/providers/common"
+)
+
+var _ common.Provider = (*external)(nil)
+
+// NewProvider creates a legacy external provider.
+func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) {
+ if cfg.ProviderType != params.ExternalProvider {
+ return nil, garmErrors.NewBadRequestError("invalid provider config")
+ }
+
+ execPath, err := cfg.External.ExecutablePath()
+ if err != nil {
+ return nil, fmt.Errorf("error fetching executable path: %w", err)
+ }
+
+ // Set GARM_INTERFACE_VERSION to the version of the interface that the external
+ // provider implements. This is used to ensure compatibility between the external
+ // provider and garm
+
+ envVars := cfg.External.GetEnvironmentVariables()
+ envVars = append(envVars, fmt.Sprintf("GARM_INTERFACE_VERSION=%s", common.Version010))
+
+ return &external{
+ ctx: ctx,
+ controllerID: controllerID,
+ cfg: cfg,
+ execPath: execPath,
+ environmentVariables: envVars,
+ }, nil
+}
+
+type external struct {
+ ctx context.Context
+ controllerID string
+ cfg *config.Provider
+ execPath string
+ environmentVariables []string
+}
+
+// CreateInstance creates a new compute instance in the provider.
+func (e *external) CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance, _ common.CreateInstanceParams) (commonParams.ProviderInstance, error) {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.CreateInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_POOL_ID=%s", bootstrapParams.PoolID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ asJs, err := json.Marshal(bootstrapParams)
+ if err != nil {
+ return commonParams.ProviderInstance{}, fmt.Errorf("error serializing bootstrap params: %w", err)
+ }
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ out, err := garmExec.Exec(ctx, e.execPath, asJs, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ if err := commonExternal.ValidateResult(param); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+
+ retAsJs, _ := json.MarshalIndent(param, "", " ")
+ slog.DebugContext(
+ ctx, "provider returned",
+ "output", string(retAsJs))
+ return param, nil
+}
+
+// Delete instance will delete the instance in a provider.
+func (e *external) DeleteInstance(ctx context.Context, instance string, _ common.DeleteInstanceParams) error {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.DeleteInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "DeleteInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ var exitErr *exec.ExitError
+ if !errors.As(err, &exitErr) || exitErr.ExitCode() != commonExecution.ExitCodeNotFound {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "DeleteInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ }
+ return nil
+}
+
+// GetInstance will return details about one instance.
+func (e *external) GetInstance(ctx context.Context, instance string, _ common.GetInstanceParams) (commonParams.ProviderInstance, error) {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.GetInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): handle error types. Of particular interest is to
+ // know when the error is ErrNotFound.
+ metrics.InstanceOperationCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ if err := commonExternal.ValidateResult(param); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+
+ return param, nil
+}
+
+// ListInstances will list all instances for a provider.
+func (e *external) ListInstances(ctx context.Context, poolID string, _ common.ListInstancesParams) ([]commonParams.ProviderInstance, error) {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.ListInstancesCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_POOL_ID=%s", poolID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param []commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ ret := make([]commonParams.ProviderInstance, len(param))
+ for idx, inst := range param {
+ if err := commonExternal.ValidateResult(inst); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+ ret[idx] = inst
+ }
+ return ret, nil
+}
+
+// RemoveAllInstances will remove all instances created by this provider.
+func (e *external) RemoveAllInstances(ctx context.Context, _ common.RemoveAllInstancesParams) error {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.RemoveAllInstancesCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "RemoveAllInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "RemoveAllInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+// Stop shuts down the instance.
+func (e *external) Stop(ctx context.Context, instance string, _ common.StopParams) error {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StopInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "Stop", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "Stop", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+// Start boots up an instance.
+func (e *external) Start(ctx context.Context, instance string, _ common.StartParams) error {
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StartInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "Start", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ _, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "Start", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+func (e *external) AsParams() params.Provider {
+ return params.Provider{
+ Name: e.cfg.Name,
+ Description: e.cfg.Description,
+ ProviderType: e.cfg.ProviderType,
+ }
+}
+
+// DisableJITConfig tells us if the provider explicitly disables JIT configuration and
+// forces runner registration tokens to be used. This may happen if a provider has not yet
+// been updated to support JIT configuration.
+func (e *external) DisableJITConfig() bool {
+ if e.cfg == nil {
+ return false
+ }
+ return e.cfg.DisableJITConfig
+}
diff --git a/runner/providers/v0.1.1/external.go b/runner/providers/v0.1.1/external.go
new file mode 100644
index 00000000..6e43dce7
--- /dev/null
+++ b/runner/providers/v0.1.1/external.go
@@ -0,0 +1,399 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package v011
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "os/exec"
+
+ garmErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonExecution "github.com/cloudbase/garm-provider-common/execution/common"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ garmExec "github.com/cloudbase/garm-provider-common/util/exec"
+ "github.com/cloudbase/garm/config"
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner/common"
+ commonExternal "github.com/cloudbase/garm/runner/providers/common"
+)
+
+var _ common.Provider = (*external)(nil)
+
+func NewProvider(ctx context.Context, cfg *config.Provider, controllerID string) (common.Provider, error) {
+ if cfg.ProviderType != params.ExternalProvider {
+ return nil, garmErrors.NewBadRequestError("invalid provider config")
+ }
+
+ execPath, err := cfg.External.ExecutablePath()
+ if err != nil {
+ return nil, fmt.Errorf("error fetching executable path: %w", err)
+ }
+
+ // Set GARM_INTERFACE_VERSION to the version of the interface that the external
+ // provider implements. This is used to ensure compatibility between the external
+ // provider and garm
+ envVars := cfg.External.GetEnvironmentVariables()
+ envVars = append(envVars, fmt.Sprintf("GARM_INTERFACE_VERSION=%s", cfg.External.InterfaceVersion))
+
+ return &external{
+ ctx: ctx,
+ controllerID: controllerID,
+ cfg: cfg,
+ execPath: execPath,
+ environmentVariables: envVars,
+ }, nil
+}
+
+type external struct {
+ ctx context.Context
+ cfg *config.Provider
+ controllerID string
+ execPath string
+ environmentVariables []string
+}
+
+// CreateInstance creates a new compute instance in the provider.
+func (e *external) CreateInstance(ctx context.Context, bootstrapParams commonParams.BootstrapInstance, _ common.CreateInstanceParams) (commonParams.ProviderInstance, error) {
+ extraspecs := bootstrapParams.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return commonParams.ProviderInstance{}, fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.CreateInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_POOL_ID=%s", bootstrapParams.PoolID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ asJs, err := json.Marshal(bootstrapParams)
+ if err != nil {
+ return commonParams.ProviderInstance{}, fmt.Errorf("error serializing bootstrap params: %w", err)
+ }
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ out, err := garmExec.Exec(ctx, e.execPath, asJs, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ if err := commonExternal.ValidateResult(param); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "CreateInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+
+ retAsJs, _ := json.MarshalIndent(param, "", " ")
+ slog.DebugContext(
+ ctx, "provider returned",
+ "output", string(retAsJs))
+ return param, nil
+}
+
+// Delete instance will delete the instance in a provider.
+func (e *external) DeleteInstance(ctx context.Context, instance string, deleteInstanceParams common.DeleteInstanceParams) error {
+ extraspecs := deleteInstanceParams.DeleteInstanceV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.DeleteInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_ID=%s", deleteInstanceParams.DeleteInstanceV011.PoolInfo.ID),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "DeleteInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ _, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ var exitErr *exec.ExitError
+ if !errors.As(err, &exitErr) || exitErr.ExitCode() != commonExecution.ExitCodeNotFound {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "DeleteInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ }
+ return nil
+}
+
+// GetInstance will return details about one instance.
+func (e *external) GetInstance(ctx context.Context, instance string, getInstanceParams common.GetInstanceParams) (commonParams.ProviderInstance, error) {
+ extraspecs := getInstanceParams.GetInstanceV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return commonParams.ProviderInstance{}, fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.GetInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_ID=%s", getInstanceParams.GetInstanceV011.PoolInfo.ID),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): handle error types. Of particular interest is to
+ // know when the error is ErrNotFound.
+ metrics.InstanceOperationCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ if err := commonExternal.ValidateResult(param); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "GetInstance", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+
+ return param, nil
+}
+
+// ListInstances will list all instances for a provider.
+func (e *external) ListInstances(ctx context.Context, poolID string, listInstancesParams common.ListInstancesParams) ([]commonParams.ProviderInstance, error) {
+ extraspecs := listInstancesParams.ListInstancesV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return []commonParams.ProviderInstance{}, fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.ListInstancesCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_POOL_ID=%s", poolID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ out, err := garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err == nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+
+ var param []commonParams.ProviderInstance
+ if err := json.Unmarshal(out, ¶m); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to decode response from binary: %s", err)
+ }
+
+ ret := make([]commonParams.ProviderInstance, len(param))
+ for idx, inst := range param {
+ if err := commonExternal.ValidateResult(inst); err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "ListInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return []commonParams.ProviderInstance{}, garmErrors.NewProviderError("failed to validate result: %s", err)
+ }
+ ret[idx] = inst
+ }
+ return ret, nil
+}
+
+// RemoveAllInstances will remove all instances created by this provider.
+func (e *external) RemoveAllInstances(ctx context.Context, removeAllInstances common.RemoveAllInstancesParams) error {
+ extraspecs := removeAllInstances.RemoveAllInstancesV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.RemoveAllInstancesCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_ID=%s", removeAllInstances.RemoveAllInstancesV011.PoolInfo.ID),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "RemoveAllInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ _, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "RemoveAllInstances", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+// Stop shuts down the instance.
+func (e *external) Stop(ctx context.Context, instance string, stopParams common.StopParams) error {
+ extraspecs := stopParams.StopV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StopInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_ID=%s", stopParams.StopV011.PoolInfo.ID),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "Stop", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ _, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "Stop", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+// Start boots up an instance.
+func (e *external) Start(ctx context.Context, instance string, startParams common.StartParams) error {
+ extraspecs := startParams.StartV011.PoolInfo.ExtraSpecs
+ extraspecsValue, err := json.Marshal(extraspecs)
+ if err != nil {
+ return fmt.Errorf("error serializing extraspecs: %w", err)
+ }
+ // Encode the extraspecs as base64 to avoid issues with special characters.
+ base64EncodedExtraSpecs := base64.StdEncoding.EncodeToString(extraspecsValue)
+ asEnv := []string{
+ fmt.Sprintf("GARM_COMMAND=%s", commonExecution.StartInstanceCommand),
+ fmt.Sprintf("GARM_CONTROLLER_ID=%s", e.controllerID),
+ fmt.Sprintf("GARM_INSTANCE_ID=%s", instance),
+ fmt.Sprintf("GARM_PROVIDER_CONFIG_FILE=%s", e.cfg.External.ConfigFile),
+ fmt.Sprintf("GARM_POOL_ID=%s", startParams.StartV011.PoolInfo.ID),
+ fmt.Sprintf("GARM_POOL_EXTRASPECS=%s", base64EncodedExtraSpecs),
+ }
+ asEnv = append(asEnv, e.environmentVariables...)
+
+ metrics.InstanceOperationCount.WithLabelValues(
+ "Start", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+
+ _, err = garmExec.Exec(ctx, e.execPath, nil, asEnv)
+ if err != nil {
+ metrics.InstanceOperationFailedCount.WithLabelValues(
+ "Start", // label: operation
+ e.cfg.Name, // label: provider
+ ).Inc()
+ return garmErrors.NewProviderError("provider binary %s returned error: %s", e.execPath, err)
+ }
+ return nil
+}
+
+func (e *external) AsParams() params.Provider {
+ return params.Provider{
+ Name: e.cfg.Name,
+ Description: e.cfg.Description,
+ ProviderType: e.cfg.ProviderType,
+ }
+}
+
+// DisableJITConfig tells us if the provider explicitly disables JIT configuration and
+// forces runner registration tokens to be used. This may happen if a provider has not yet
+// been updated to support JIT configuration.
+func (e *external) DisableJITConfig() bool {
+ if e.cfg == nil {
+ return false
+ }
+ return e.cfg.DisableJITConfig
+}
diff --git a/runner/repositories.go b/runner/repositories.go
index 26b01ce8..0f21d882 100644
--- a/runner/repositories.go
+++ b/runner/repositories.go
@@ -16,17 +16,16 @@ package runner
import (
"context"
+ "errors"
"fmt"
- "log"
+ "log/slog"
"strings"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/auth"
- runnerErrors "github.com/cloudbase/garm/errors"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
"github.com/cloudbase/garm/util/appdefaults"
-
- "github.com/pkg/errors"
)
func (r *Runner) CreateRepository(ctx context.Context, param params.CreateRepoParams) (repo params.Repository, err error) {
@@ -35,57 +34,72 @@ func (r *Runner) CreateRepository(ctx context.Context, param params.CreateRepoPa
}
if err := param.Validate(); err != nil {
- return params.Repository{}, errors.Wrap(err, "validating params")
+ return params.Repository{}, fmt.Errorf("error validating params: %w", err)
}
- creds, ok := r.credentials[param.CredentialsName]
- if !ok {
+ var creds params.ForgeCredentials
+ switch param.ForgeType {
+ case params.GithubEndpointType:
+ creds, err = r.store.GetGithubCredentialsByName(ctx, param.CredentialsName, true)
+ case params.GiteaEndpointType:
+ creds, err = r.store.GetGiteaCredentialsByName(ctx, param.CredentialsName, true)
+ default:
+ creds, err = r.ResolveForgeCredentialByName(ctx, param.CredentialsName)
+ }
+
+ if err != nil {
return params.Repository{}, runnerErrors.NewBadRequestError("credentials %s not defined", param.CredentialsName)
}
- _, err = r.store.GetRepository(ctx, param.Owner, param.Name)
+ _, err = r.store.GetRepository(ctx, param.Owner, param.Name, creds.Endpoint.Name)
if err != nil {
if !errors.Is(err, runnerErrors.ErrNotFound) {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
+ return params.Repository{}, fmt.Errorf("error fetching repo: %w", err)
}
} else {
return params.Repository{}, runnerErrors.NewConflictError("repository %s/%s already exists", param.Owner, param.Name)
}
- repo, err = r.store.CreateRepository(ctx, param.Owner, param.Name, creds.Name, param.WebhookSecret)
+ repo, err = r.store.CreateRepository(ctx, param.Owner, param.Name, creds, param.WebhookSecret, param.PoolBalancerType)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "creating repository")
+ return params.Repository{}, fmt.Errorf("error creating repository: %w", err)
}
defer func() {
if err != nil {
if deleteErr := r.store.DeleteRepository(ctx, repo.ID); deleteErr != nil {
- log.Printf("failed to delete repository: %s", deleteErr)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to delete repository",
+ "repository_id", repo.ID)
}
}
}()
+ // Use the admin context in the pool manager. Any access control is already done above when
+ // updating the store.
poolMgr, err := r.poolManagerCtrl.CreateRepoPoolManager(r.ctx, repo, r.providers, r.store)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "creating repo pool manager")
+ return params.Repository{}, fmt.Errorf("error creating repo pool manager: %w", err)
}
if err := poolMgr.Start(); err != nil {
if deleteErr := r.poolManagerCtrl.DeleteRepoPoolManager(repo); deleteErr != nil {
- log.Printf("failed to cleanup pool manager for repo %s", repo.ID)
+ slog.With(slog.Any("error", deleteErr)).ErrorContext(
+ ctx, "failed to cleanup pool manager for repo",
+ "repository_id", repo.ID)
}
- return params.Repository{}, errors.Wrap(err, "starting repo pool manager")
+ return params.Repository{}, fmt.Errorf("error starting repo pool manager: %w", err)
}
return repo, nil
}
-func (r *Runner) ListRepositories(ctx context.Context) ([]params.Repository, error) {
+func (r *Runner) ListRepositories(ctx context.Context, filter params.RepositoryFilter) ([]params.Repository, error) {
if !auth.IsAdmin(ctx) {
return nil, runnerErrors.ErrUnauthorized
}
- repos, err := r.store.ListRepositories(ctx)
+ repos, err := r.store.ListRepositories(ctx, filter)
if err != nil {
- return nil, errors.Wrap(err, "listing repositories")
+ return nil, fmt.Errorf("error listing repositories: %w", err)
}
var allRepos []params.Repository
@@ -111,7 +125,7 @@ func (r *Runner) GetRepositoryByID(ctx context.Context, repoID string) (params.R
repo, err := r.store.GetRepositoryByID(ctx, repoID)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repository")
+ return params.Repository{}, fmt.Errorf("error fetching repository: %w", err)
}
poolMgr, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
@@ -123,41 +137,70 @@ func (r *Runner) GetRepositoryByID(ctx context.Context, repoID string) (params.R
return repo, nil
}
-func (r *Runner) DeleteRepository(ctx context.Context, repoID string) error {
+func (r *Runner) DeleteRepository(ctx context.Context, repoID string, keepWebhook bool) error {
if !auth.IsAdmin(ctx) {
return runnerErrors.ErrUnauthorized
}
repo, err := r.store.GetRepositoryByID(ctx, repoID)
if err != nil {
- return errors.Wrap(err, "fetching repo")
+ return fmt.Errorf("error fetching repo: %w", err)
}
- pools, err := r.store.ListRepoPools(ctx, repoID)
+ entity, err := repo.GetEntity()
if err != nil {
- return errors.Wrap(err, "fetching repo pools")
+ return fmt.Errorf("error getting entity: %w", err)
+ }
+
+ pools, err := r.store.ListEntityPools(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching repo pools: %w", err)
}
if len(pools) > 0 {
- poolIds := []string{}
+ poolIDs := []string{}
for _, pool := range pools {
- poolIds = append(poolIds, pool.ID)
+ poolIDs = append(poolIDs, pool.ID)
}
- return runnerErrors.NewBadRequestError("repo has pools defined (%s)", strings.Join(poolIds, ", "))
+ return runnerErrors.NewBadRequestError("repo has pools defined (%s)", strings.Join(poolIDs, ", "))
+ }
+
+ scaleSets, err := r.store.ListEntityScaleSets(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error fetching repo scale sets: %w", err)
+ }
+
+ if len(scaleSets) > 0 {
+ return runnerErrors.NewBadRequestError("repo has scale sets defined; delete them first")
+ }
+
+ if !keepWebhook && r.config.Default.EnableWebhookManagement {
+ poolMgr, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
+ if err != nil {
+ return fmt.Errorf("error fetching pool manager: %w", err)
+ }
+
+ if err := poolMgr.UninstallWebhook(ctx); err != nil {
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): Should we error out here?
+ slog.With(slog.Any("error", err)).ErrorContext(
+ ctx, "failed to uninstall webhook",
+ "pool_manager_id", poolMgr.ID())
+ }
}
if err := r.poolManagerCtrl.DeleteRepoPoolManager(repo); err != nil {
- return errors.Wrap(err, "deleting repo pool manager")
+ return fmt.Errorf("error deleting repo pool manager: %w", err)
}
if err := r.store.DeleteRepository(ctx, repoID); err != nil {
- return errors.Wrap(err, "removing repository")
+ return fmt.Errorf("error removing repository: %w", err)
}
return nil
}
-func (r *Runner) UpdateRepository(ctx context.Context, repoID string, param params.UpdateRepositoryParams) (params.Repository, error) {
+func (r *Runner) UpdateRepository(ctx context.Context, repoID string, param params.UpdateEntityParams) (params.Repository, error) {
if !auth.IsAdmin(ctx) {
return params.Repository{}, runnerErrors.ErrUnauthorized
}
@@ -165,38 +208,24 @@ func (r *Runner) UpdateRepository(ctx context.Context, repoID string, param para
r.mux.Lock()
defer r.mux.Unlock()
- repo, err := r.store.GetRepositoryByID(ctx, repoID)
- if err != nil {
- return params.Repository{}, errors.Wrap(err, "fetching repo")
+ switch param.PoolBalancerType {
+ case params.PoolBalancerTypeRoundRobin, params.PoolBalancerTypePack, params.PoolBalancerTypeNone:
+ default:
+ return params.Repository{}, runnerErrors.NewBadRequestError("invalid pool balancer type: %s", param.PoolBalancerType)
}
- if param.CredentialsName != "" {
- // Check that credentials are set before saving to db
- if _, ok := r.credentials[param.CredentialsName]; !ok {
- return params.Repository{}, runnerErrors.NewBadRequestError("invalid credentials (%s) for repo %s/%s", param.CredentialsName, repo.Owner, repo.Name)
- }
- }
-
- repo, err = r.store.UpdateRepository(ctx, repoID, param)
+ slog.InfoContext(ctx, "updating repository", "repo_id", repoID, "param", param)
+ repo, err := r.store.UpdateRepository(ctx, repoID, param)
if err != nil {
- return params.Repository{}, errors.Wrap(err, "updating repo")
+ return params.Repository{}, fmt.Errorf("error updating repo: %w", err)
}
poolMgr, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
if err != nil {
- newState := params.UpdatePoolStateParams{
- WebhookSecret: repo.WebhookSecret,
- }
- // stop the pool mgr
- if err := poolMgr.RefreshState(newState); err != nil {
- return params.Repository{}, errors.Wrap(err, "updating repo pool manager")
- }
- } else {
- if _, err := r.poolManagerCtrl.CreateRepoPoolManager(r.ctx, repo, r.providers, r.store); err != nil {
- return params.Repository{}, errors.Wrap(err, "creating repo pool manager")
- }
+ return params.Repository{}, fmt.Errorf("error getting pool manager: %w", err)
}
+ repo.PoolManagerStatus = poolMgr.Status()
return repo, nil
}
@@ -205,30 +234,23 @@ func (r *Runner) CreateRepoPool(ctx context.Context, repoID string, param params
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- r.mux.Lock()
- defer r.mux.Unlock()
-
- repo, err := r.store.GetRepositoryByID(ctx, repoID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching repo")
- }
-
- if _, err := r.poolManagerCtrl.GetRepoPoolManager(repo); err != nil {
- return params.Pool{}, runnerErrors.ErrNotFound
- }
-
createPoolParams, err := r.appendTagsToCreatePoolParams(param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool params")
+ return params.Pool{}, fmt.Errorf("error appending tags to create pool params: %w", err)
}
if createPoolParams.RunnerBootstrapTimeout == 0 {
createPoolParams.RunnerBootstrapTimeout = appdefaults.DefaultRunnerBootstrapTimeout
}
- pool, err := r.store.CreateRepositoryPool(ctx, repoID, createPoolParams)
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+
+ pool, err := r.store.CreateEntityPool(ctx, entity, createPoolParams)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "creating pool")
+ return params.Pool{}, fmt.Errorf("error creating pool: %w", err)
}
return pool, nil
@@ -239,10 +261,16 @@ func (r *Runner) GetRepoPoolByID(ctx context.Context, repoID, poolID string) (pa
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetRepositoryPool(ctx, repoID, poolID)
- if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
}
+
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
+ if err != nil {
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
+ }
+
return pool, nil
}
@@ -251,27 +279,27 @@ func (r *Runner) DeleteRepoPool(ctx context.Context, repoID, poolID string) erro
return runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetRepositoryPool(ctx, repoID, poolID)
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return errors.Wrap(err, "fetching pool")
- }
-
- instances, err := r.store.ListPoolInstances(ctx, pool.ID)
- if err != nil {
- return errors.Wrap(err, "fetching instances")
+ return fmt.Errorf("error fetching pool: %w", err)
}
+ // nolint:golangci-lint,godox
// TODO: implement a count function
- if len(instances) > 0 {
+ if len(pool.Instances) > 0 {
runnerIDs := []string{}
- for _, run := range instances {
+ for _, run := range pool.Instances {
runnerIDs = append(runnerIDs, run.ID)
}
return runnerErrors.NewBadRequestError("pool has runners: %s", strings.Join(runnerIDs, ", "))
}
- if err := r.store.DeleteRepositoryPool(ctx, repoID, poolID); err != nil {
- return errors.Wrap(err, "deleting pool")
+ if err := r.store.DeleteEntityPool(ctx, entity, poolID); err != nil {
+ return fmt.Errorf("error deleting pool: %w", err)
}
return nil
}
@@ -280,10 +308,13 @@ func (r *Runner) ListRepoPools(ctx context.Context, repoID string) ([]params.Poo
if !auth.IsAdmin(ctx) {
return []params.Pool{}, runnerErrors.ErrUnauthorized
}
-
- pools, err := r.store.ListRepoPools(ctx, repoID)
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pools, err := r.store.ListEntityPools(ctx, entity)
if err != nil {
- return nil, errors.Wrap(err, "fetching pools")
+ return nil, fmt.Errorf("error fetching pools: %w", err)
}
return pools, nil
}
@@ -295,7 +326,7 @@ func (r *Runner) ListPoolInstances(ctx context.Context, poolID string) ([]params
instances, err := r.store.ListPoolInstances(ctx, poolID)
if err != nil {
- return []params.Instance{}, errors.Wrap(err, "fetching instances")
+ return []params.Instance{}, fmt.Errorf("error fetching instances: %w", err)
}
return instances, nil
}
@@ -305,9 +336,13 @@ func (r *Runner) UpdateRepoPool(ctx context.Context, repoID, poolID string, para
return params.Pool{}, runnerErrors.ErrUnauthorized
}
- pool, err := r.store.GetRepositoryPool(ctx, repoID, poolID)
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := r.store.GetEntityPool(ctx, entity, poolID)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "fetching pool")
+ return params.Pool{}, fmt.Errorf("error fetching pool: %w", err)
}
maxRunners := pool.MaxRunners
@@ -324,9 +359,9 @@ func (r *Runner) UpdateRepoPool(ctx context.Context, repoID, poolID string, para
return params.Pool{}, runnerErrors.NewBadRequestError("min_idle_runners cannot be larger than max_runners")
}
- newPool, err := r.store.UpdateRepositoryPool(ctx, repoID, poolID, param)
+ newPool, err := r.store.UpdateEntityPool(ctx, entity, poolID, param)
if err != nil {
- return params.Pool{}, errors.Wrap(err, "updating pool")
+ return params.Pool{}, fmt.Errorf("error updating pool: %w", err)
}
return newPool, nil
}
@@ -335,26 +370,94 @@ func (r *Runner) ListRepoInstances(ctx context.Context, repoID string) ([]params
if !auth.IsAdmin(ctx) {
return nil, runnerErrors.ErrUnauthorized
}
-
- instances, err := r.store.ListRepoInstances(ctx, repoID)
+ entity := params.ForgeEntity{
+ ID: repoID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ instances, err := r.store.ListEntityInstances(ctx, entity)
if err != nil {
- return []params.Instance{}, errors.Wrap(err, "fetching instances")
+ return []params.Instance{}, fmt.Errorf("error , errfetching instances: %w", err)
}
return instances, nil
}
-func (r *Runner) findRepoPoolManager(owner, name string) (common.PoolManager, error) {
+func (r *Runner) findRepoPoolManager(owner, name, endpointName string) (common.PoolManager, error) {
r.mux.Lock()
defer r.mux.Unlock()
- repo, err := r.store.GetRepository(r.ctx, owner, name)
+ repo, err := r.store.GetRepository(r.ctx, owner, name, endpointName)
if err != nil {
- return nil, errors.Wrap(err, "fetching repo")
+ return nil, fmt.Errorf("error fetching repo: %w", err)
}
poolManager, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
if err != nil {
- return nil, errors.Wrap(err, "fetching pool manager for repo")
+ return nil, fmt.Errorf("error fetching pool manager for repo: %w", err)
}
return poolManager, nil
}
+
+func (r *Runner) InstallRepoWebhook(ctx context.Context, repoID string, param params.InstallWebhookParams) (params.HookInfo, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.HookInfo{}, runnerErrors.ErrUnauthorized
+ }
+
+ repo, err := r.store.GetRepositoryByID(ctx, repoID)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching repo: %w", err)
+ }
+
+ poolManager, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching pool manager for repo: %w", err)
+ }
+
+ info, err := poolManager.InstallWebhook(ctx, param)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error installing webhook: %w", err)
+ }
+ return info, nil
+}
+
+func (r *Runner) UninstallRepoWebhook(ctx context.Context, repoID string) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ repo, err := r.store.GetRepositoryByID(ctx, repoID)
+ if err != nil {
+ return fmt.Errorf("error fetching repo: %w", err)
+ }
+
+ poolManager, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
+ if err != nil {
+ return fmt.Errorf("error fetching pool manager for repo: %w", err)
+ }
+
+ if err := poolManager.UninstallWebhook(ctx); err != nil {
+ return fmt.Errorf("error uninstalling webhook: %w", err)
+ }
+ return nil
+}
+
+func (r *Runner) GetRepoWebhookInfo(ctx context.Context, repoID string) (params.HookInfo, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.HookInfo{}, runnerErrors.ErrUnauthorized
+ }
+
+ repo, err := r.store.GetRepositoryByID(ctx, repoID)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching repo: %w", err)
+ }
+
+ poolManager, err := r.poolManagerCtrl.GetRepoPoolManager(repo)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error fetching pool manager for repo: %w", err)
+ }
+
+ info, err := poolManager.GetWebhookInfo(ctx)
+ if err != nil {
+ return params.HookInfo{}, fmt.Errorf("error getting webhook info: %w", err)
+ }
+ return info, nil
+}
diff --git a/runner/repositories_test.go b/runner/repositories_test.go
index ba84868b..8f195ae3 100644
--- a/runner/repositories_test.go
+++ b/runner/repositories_test.go
@@ -16,58 +16,72 @@ package runner
import (
"context"
+ "errors"
"fmt"
"testing"
- "github.com/cloudbase/garm/auth"
- "github.com/cloudbase/garm/config"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/suite"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
"github.com/cloudbase/garm/database"
dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
+ "github.com/cloudbase/garm/database/watcher"
garmTesting "github.com/cloudbase/garm/internal/testing"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
runnerCommonMocks "github.com/cloudbase/garm/runner/common/mocks"
runnerMocks "github.com/cloudbase/garm/runner/mocks"
-
- "github.com/stretchr/testify/mock"
- "github.com/stretchr/testify/suite"
)
type RepoTestFixtures struct {
- AdminContext context.Context
- Store dbCommon.Store
- StoreRepos map[string]params.Repository
- Providers map[string]common.Provider
- Credentials map[string]config.Github
- CreateRepoParams params.CreateRepoParams
- CreatePoolParams params.CreatePoolParams
- CreateInstanceParams params.CreateInstanceParams
- UpdateRepoParams params.UpdateRepositoryParams
- UpdatePoolParams params.UpdatePoolParams
- UpdatePoolStateParams params.UpdatePoolStateParams
- ErrMock error
- ProviderMock *runnerCommonMocks.Provider
- PoolMgrMock *runnerCommonMocks.PoolManager
- PoolMgrCtrlMock *runnerMocks.PoolManagerController
+ AdminContext context.Context
+ Store dbCommon.Store
+ StoreRepos map[string]params.Repository
+ Providers map[string]common.Provider
+ Credentials map[string]params.ForgeCredentials
+ CreateRepoParams params.CreateRepoParams
+ CreatePoolParams params.CreatePoolParams
+ CreateInstanceParams params.CreateInstanceParams
+ UpdateRepoParams params.UpdateEntityParams
+ UpdatePoolParams params.UpdatePoolParams
+ ErrMock error
+ ProviderMock *runnerCommonMocks.Provider
+ PoolMgrMock *runnerCommonMocks.PoolManager
+ PoolMgrCtrlMock *runnerMocks.PoolManagerController
+}
+
+func init() {
+ watcher.SetWatcher(&garmTesting.MockWatcher{})
}
type RepoTestSuite struct {
suite.Suite
Fixtures *RepoTestFixtures
Runner *Runner
+
+ testCreds params.ForgeCredentials
+ secondaryTestCreds params.ForgeCredentials
+ giteaTestCreds params.ForgeCredentials
+ githubEndpoint params.ForgeEndpoint
+ giteaEndpoint params.ForgeEndpoint
}
func (s *RepoTestSuite) SetupTest() {
- adminCtx := auth.GetAdminContext()
-
// create testing sqlite database
dbCfg := garmTesting.GetTestSqliteDBConfig(s.T())
- db, err := database.NewDatabase(adminCtx, dbCfg)
+ db, err := database.NewDatabase(context.Background(), dbCfg)
if err != nil {
s.FailNow(fmt.Sprintf("failed to create db connection: %s", err))
}
+ adminCtx := garmTesting.ImpersonateAdminContext(context.Background(), db, s.T())
+ s.githubEndpoint = garmTesting.CreateDefaultGithubEndpoint(adminCtx, db, s.T())
+ s.giteaEndpoint = garmTesting.CreateDefaultGiteaEndpoint(adminCtx, db, s.T())
+ s.testCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "new-creds", db, s.T(), s.githubEndpoint)
+ s.secondaryTestCreds = garmTesting.CreateTestGithubCredentials(adminCtx, "secondary-creds", db, s.T(), s.githubEndpoint)
+ s.giteaTestCreds = garmTesting.CreateTestGiteaCredentials(adminCtx, "gitea-creds", db, s.T(), s.giteaEndpoint)
+
// create some repository objects in the database, for testing purposes
repos := map[string]params.Repository{}
for i := 1; i <= 3; i++ {
@@ -76,11 +90,12 @@ func (s *RepoTestSuite) SetupTest() {
adminCtx,
fmt.Sprintf("test-owner-%v", i),
name,
- fmt.Sprintf("test-creds-%v", i),
+ s.testCreds,
fmt.Sprintf("test-webhook-secret-%v", i),
+ params.PoolBalancerTypeRoundRobin,
)
if err != nil {
- s.FailNow(fmt.Sprintf("failed to create database object (test-repo-%v)", i))
+ s.FailNow(fmt.Sprintf("failed to create database object (test-repo-%v): %q", i, err))
}
repos[name] = repo
}
@@ -90,24 +105,22 @@ func (s *RepoTestSuite) SetupTest() {
var minIdleRunners uint = 20
providerMock := runnerCommonMocks.NewProvider(s.T())
fixtures := &RepoTestFixtures{
- AdminContext: auth.GetAdminContext(),
+ AdminContext: adminCtx,
Store: db,
StoreRepos: repos,
Providers: map[string]common.Provider{
"test-provider": providerMock,
},
- Credentials: map[string]config.Github{
- "test-creds": {
- Name: "test-creds-name",
- Description: "test-creds-description",
- OAuth2Token: "test-creds-oauth2-token",
- },
+ Credentials: map[string]params.ForgeCredentials{
+ s.testCreds.Name: s.testCreds,
+ s.secondaryTestCreds.Name: s.secondaryTestCreds,
},
CreateRepoParams: params.CreateRepoParams{
Owner: "test-owner-create",
Name: "test-repo-create",
- CredentialsName: "test-creds",
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-create-repo-webhook-secret",
+ ForgeType: params.GithubEndpointType,
},
CreatePoolParams: params.CreatePoolParams{
ProviderName: "test-provider",
@@ -117,15 +130,15 @@ func (s *RepoTestSuite) SetupTest() {
Flavor: "test",
OSType: "linux",
OSArch: "arm64",
- Tags: []string{"self-hosted", "arm64", "linux"},
+ Tags: []string{"arm64-linux-runner"},
RunnerBootstrapTimeout: 0,
},
CreateInstanceParams: params.CreateInstanceParams{
Name: "test-instance-name",
OSType: "linux",
},
- UpdateRepoParams: params.UpdateRepositoryParams{
- CredentialsName: "test-creds",
+ UpdateRepoParams: params.UpdateEntityParams{
+ CredentialsName: s.testCreds.Name,
WebhookSecret: "test-update-repo-webhook-secret",
},
UpdatePoolParams: params.UpdatePoolParams{
@@ -134,9 +147,6 @@ func (s *RepoTestSuite) SetupTest() {
Image: "test-images-updated",
Flavor: "test-flavor-updated",
},
- UpdatePoolStateParams: params.UpdatePoolStateParams{
- WebhookSecret: "test-update-repo-webhook-secret",
- },
ErrMock: fmt.Errorf("mock error"),
ProviderMock: providerMock,
PoolMgrMock: runnerCommonMocks.NewPoolManager(s.T()),
@@ -147,7 +157,6 @@ func (s *RepoTestSuite) SetupTest() {
// setup test runner
runner := &Runner{
providers: fixtures.Providers,
- credentials: fixtures.Credentials,
ctx: fixtures.AdminContext,
store: fixtures.Store,
poolManagerCtrl: fixtures.PoolMgrCtrlMock,
@@ -166,10 +175,32 @@ func (s *RepoTestSuite) TestCreateRepository() {
// assertions
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
+
s.Require().Nil(err)
s.Require().Equal(s.Fixtures.CreateRepoParams.Owner, repo.Owner)
s.Require().Equal(s.Fixtures.CreateRepoParams.Name, repo.Name)
- s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateRepoParams.CredentialsName].Name, repo.CredentialsName)
+ s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateRepoParams.CredentialsName].Name, repo.Credentials.Name)
+ s.Require().Equal(params.PoolBalancerTypeRoundRobin, repo.PoolBalancerType)
+}
+
+func (s *RepoTestSuite) TestCreateRepositoryPoolBalancerTypePack() {
+ // setup mocks expectations
+ s.Fixtures.PoolMgrMock.On("Start").Return(nil)
+ s.Fixtures.PoolMgrCtrlMock.On("CreateRepoPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Repository"), s.Fixtures.Providers, s.Fixtures.Store).Return(s.Fixtures.PoolMgrMock, nil)
+
+ // call tested function
+ param := s.Fixtures.CreateRepoParams
+ param.PoolBalancerType = params.PoolBalancerTypePack
+ repo, err := s.Runner.CreateRepository(s.Fixtures.AdminContext, param)
+
+ // assertions
+ s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
+ s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
+ s.Require().Nil(err)
+ s.Require().Equal(param.Owner, repo.Owner)
+ s.Require().Equal(param.Name, repo.Name)
+ s.Require().Equal(s.Fixtures.Credentials[s.Fixtures.CreateRepoParams.CredentialsName].Name, repo.Credentials.Name)
+ s.Require().Equal(params.PoolBalancerTypePack, repo.PoolBalancerType)
}
func (s *RepoTestSuite) TestCreateRepositoryErrUnauthorized() {
@@ -185,7 +216,7 @@ func (s *RepoTestSuite) TestCreateRepositoryEmptyParams() {
}
func (s *RepoTestSuite) TestCreateRepositoryMissingCredentials() {
- s.Fixtures.CreateRepoParams.CredentialsName = "not-existent-creds-name"
+ s.Fixtures.CreateRepoParams.CredentialsName = notExistingCredentialsName
_, err := s.Runner.CreateRepository(s.Fixtures.AdminContext, s.Fixtures.CreateRepoParams)
@@ -209,7 +240,7 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("creating repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error creating repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *RepoTestSuite) TestCreateRepositoryStartPoolMgrFailed() {
@@ -221,20 +252,87 @@ func (s *RepoTestSuite) TestCreateRepositoryStartPoolMgrFailed() {
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("starting repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error starting repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *RepoTestSuite) TestListRepositories() {
s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
- repos, err := s.Runner.ListRepositories(s.Fixtures.AdminContext)
+ repos, err := s.Runner.ListRepositories(s.Fixtures.AdminContext, params.RepositoryFilter{})
s.Require().Nil(err)
garmTesting.EqualDBEntityByName(s.T(), garmTesting.DBEntityMapToSlice(s.Fixtures.StoreRepos), repos)
}
+func (s *RepoTestSuite) TestListRepositoriesWithFilters() {
+ s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
+
+ repo, err := s.Fixtures.Store.CreateRepository(
+ s.Fixtures.AdminContext,
+ "example-owner",
+ "example-repo",
+ s.testCreds,
+ "test-webhook-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create database object (example-repo): %q", err))
+ }
+
+ repo2, err := s.Fixtures.Store.CreateRepository(
+ s.Fixtures.AdminContext,
+ "another-example-owner",
+ "example-repo",
+ s.testCreds,
+ "test-webhook-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create database object (example-repo): %q", err))
+ }
+
+ repo3, err := s.Fixtures.Store.CreateRepository(
+ s.Fixtures.AdminContext,
+ "example-owner",
+ "example-repo",
+ s.giteaTestCreds,
+ "test-webhook-secret",
+ params.PoolBalancerTypeRoundRobin,
+ )
+ if err != nil {
+ s.FailNow(fmt.Sprintf("failed to create database object (example-repo): %q", err))
+ }
+
+ repos, err := s.Runner.ListRepositories(s.Fixtures.AdminContext, params.RepositoryFilter{Name: "example-repo"})
+
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Repository{repo, repo2, repo3}, repos)
+
+ repos, err = s.Runner.ListRepositories(
+ s.Fixtures.AdminContext,
+ params.RepositoryFilter{
+ Name: "example-repo",
+ Owner: "example-owner",
+ },
+ )
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Repository{repo, repo3}, repos)
+
+ repos, err = s.Runner.ListRepositories(
+ s.Fixtures.AdminContext,
+ params.RepositoryFilter{
+ Name: "example-repo",
+ Owner: "example-owner",
+ Endpoint: s.giteaEndpoint.Name,
+ },
+ )
+ s.Require().Nil(err)
+ garmTesting.EqualDBEntityByName(s.T(), []params.Repository{repo3}, repos)
+}
+
func (s *RepoTestSuite) TestListRepositoriesErrUnauthorized() {
- _, err := s.Runner.ListRepositories(context.Background())
+ _, err := s.Runner.ListRepositories(context.Background(), params.RepositoryFilter{})
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
@@ -257,28 +355,32 @@ func (s *RepoTestSuite) TestGetRepositoryByIDErrUnauthorized() {
func (s *RepoTestSuite) TestDeleteRepository() {
s.Fixtures.PoolMgrCtrlMock.On("DeleteRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(nil)
- err := s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID)
+ err := s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, true)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
s.Require().Nil(err)
_, err = s.Fixtures.Store.GetRepositoryByID(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID)
- s.Require().Equal("fetching repo: not found", err.Error())
+ s.Require().Equal("error fetching repo: not found", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepositoryErrUnauthorized() {
- err := s.Runner.DeleteRepository(context.Background(), "dummy-repo-id")
+ err := s.Runner.DeleteRepository(context.Background(), "dummy-repo-id", true)
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
func (s *RepoTestSuite) TestDeleteRepositoryPoolDefinedFailed() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create store repositories pool: %v", err))
}
- err = s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID)
+ err = s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, true)
s.Require().Equal(runnerErrors.NewBadRequestError("repo has pools defined (%s)", pool.ID), err)
}
@@ -286,64 +388,76 @@ func (s *RepoTestSuite) TestDeleteRepositoryPoolDefinedFailed() {
func (s *RepoTestSuite) TestDeleteRepositoryPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("DeleteRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.ErrMock)
- err := s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID)
+ err := s.Runner.DeleteRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, true)
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("deleting repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error deleting repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *RepoTestSuite) TestUpdateRepository() {
s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
- s.Fixtures.PoolMgrCtrlMock.On("CreateRepoPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Repository"), s.Fixtures.Providers, s.Fixtures.Store).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
repo, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.UpdateRepoParams)
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
+ s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Require().Nil(err)
- s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, repo.CredentialsName)
+ s.Require().Equal(s.Fixtures.UpdateRepoParams.CredentialsName, repo.Credentials.Name)
s.Require().Equal(s.Fixtures.UpdateRepoParams.WebhookSecret, repo.WebhookSecret)
+ s.Require().Equal(params.PoolBalancerTypeRoundRobin, repo.PoolBalancerType)
+}
+
+func (s *RepoTestSuite) TestUpdateRepositoryBalancingType() {
+ s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
+ s.Fixtures.PoolMgrMock.On("Status").Return(params.PoolManagerStatus{IsRunning: true}, nil)
+
+ updateRepoParams := s.Fixtures.UpdateRepoParams
+ updateRepoParams.PoolBalancerType = params.PoolBalancerTypePack
+ repo, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, updateRepoParams)
+
+ s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
+ s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
+ s.Require().Nil(err)
+ s.Require().Equal(updateRepoParams.CredentialsName, repo.Credentials.Name)
+ s.Require().Equal(updateRepoParams.WebhookSecret, repo.WebhookSecret)
+ s.Require().Equal(params.PoolBalancerTypePack, repo.PoolBalancerType)
}
func (s *RepoTestSuite) TestUpdateRepositoryErrUnauthorized() {
_, err := s.Runner.UpdateRepository(context.Background(), "dummy-repo-id", s.Fixtures.UpdateRepoParams)
-
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
func (s *RepoTestSuite) TestUpdateRepositoryInvalidCreds() {
- s.Fixtures.UpdateRepoParams.CredentialsName = "invalid-creds-name"
+ s.Fixtures.UpdateRepoParams.CredentialsName = invalidCredentialsName
_, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.UpdateRepoParams)
- s.Require().Equal(runnerErrors.NewBadRequestError("invalid credentials (%s) for repo %s/%s", s.Fixtures.UpdateRepoParams.CredentialsName, s.Fixtures.StoreRepos["test-repo-1"].Owner, s.Fixtures.StoreRepos["test-repo-1"].Name), err)
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ s.FailNow(fmt.Sprintf("expected error: %v", runnerErrors.ErrNotFound))
+ }
}
func (s *RepoTestSuite) TestUpdateRepositoryPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
- s.Fixtures.PoolMgrMock.On("RefreshState", s.Fixtures.UpdatePoolStateParams).Return(s.Fixtures.ErrMock)
_, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.UpdateRepoParams)
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("updating repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error getting pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *RepoTestSuite) TestUpdateRepositoryCreateRepoPoolMgrFailed() {
- s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
- s.Fixtures.PoolMgrCtrlMock.On("CreateRepoPoolManager", s.Fixtures.AdminContext, mock.AnythingOfType("params.Repository"), s.Fixtures.Providers, s.Fixtures.Store).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
+ s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
_, err := s.Runner.UpdateRepository(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.UpdateRepoParams)
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(fmt.Sprintf("creating repo pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
+ s.Require().Equal(fmt.Sprintf("error getting pool manager: %s", s.Fixtures.ErrMock.Error()), err.Error())
}
func (s *RepoTestSuite) TestCreateRepoPool() {
- s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
-
pool, err := s.Runner.CreateRepoPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
@@ -367,30 +481,21 @@ func (s *RepoTestSuite) TestCreateRepoPoolErrUnauthorized() {
s.Require().Equal(runnerErrors.ErrUnauthorized, err)
}
-func (s *RepoTestSuite) TestCreateRepoPoolErrNotFound() {
- s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
-
- _, err := s.Runner.CreateRepoPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
-
- s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
- s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Equal(runnerErrors.ErrNotFound, err)
-}
-
func (s *RepoTestSuite) TestCreateRepoPoolFetchPoolParamsFailed() {
- s.Fixtures.CreatePoolParams.ProviderName = "not-existent-provider-name"
-
- s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
-
+ s.Fixtures.CreatePoolParams.ProviderName = notExistingProviderName
_, err := s.Runner.CreateRepoPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
- s.Require().Regexp("fetching pool params: no such provider", err.Error())
+ s.Require().Regexp("appending tags to create pool params: no such provider not-existent-provider-name", err.Error())
}
func (s *RepoTestSuite) TestGetRepoPoolByID() {
- repoPool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ repoPool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %s", err))
}
@@ -408,7 +513,11 @@ func (s *RepoTestSuite) TestGetRepoPoolByIDErrUnauthorized() {
}
func (s *RepoTestSuite) TestDeleteRepoPool() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %s", err))
}
@@ -417,8 +526,8 @@ func (s *RepoTestSuite) TestDeleteRepoPool() {
s.Require().Nil(err)
- _, err = s.Fixtures.Store.GetRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, pool.ID)
- s.Require().Equal("fetching pool: finding pool: not found", err.Error())
+ _, err = s.Fixtures.Store.GetEntityPool(s.Fixtures.AdminContext, entity, pool.ID)
+ s.Require().Equal("fetching pool: error finding pool: not found", err.Error())
}
func (s *RepoTestSuite) TestDeleteRepoPoolErrUnauthorized() {
@@ -428,7 +537,11 @@ func (s *RepoTestSuite) TestDeleteRepoPoolErrUnauthorized() {
}
func (s *RepoTestSuite) TestDeleteRepoPoolRunnersFailed() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %s", err))
}
@@ -443,10 +556,14 @@ func (s *RepoTestSuite) TestDeleteRepoPoolRunnersFailed() {
}
func (s *RepoTestSuite) TestListRepoPools() {
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
repoPools := []params.Pool{}
for i := 1; i <= 2; i++ {
s.Fixtures.CreatePoolParams.Image = fmt.Sprintf("test-repo-%v", i)
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
@@ -466,7 +583,11 @@ func (s *RepoTestSuite) TestListRepoPoolsErrUnauthorized() {
}
func (s *RepoTestSuite) TestListPoolInstances() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
@@ -493,7 +614,11 @@ func (s *RepoTestSuite) TestListPoolInstancesErrUnauthorized() {
}
func (s *RepoTestSuite) TestUpdateRepoPool() {
- repoPool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ repoPool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create store repositories pool: %v", err))
}
@@ -512,7 +637,11 @@ func (s *RepoTestSuite) TestUpdateRepoPoolErrUnauthorized() {
}
func (s *RepoTestSuite) TestUpdateRepoPoolMinIdleGreaterThanMax() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %s", err))
}
@@ -527,7 +656,11 @@ func (s *RepoTestSuite) TestUpdateRepoPoolMinIdleGreaterThanMax() {
}
func (s *RepoTestSuite) TestListRepoInstances() {
- pool, err := s.Fixtures.Store.CreateRepositoryPool(s.Fixtures.AdminContext, s.Fixtures.StoreRepos["test-repo-1"].ID, s.Fixtures.CreatePoolParams)
+ entity := params.ForgeEntity{
+ ID: s.Fixtures.StoreRepos["test-repo-1"].ID,
+ EntityType: params.ForgeEntityTypeRepository,
+ }
+ pool, err := s.Fixtures.Store.CreateEntityPool(s.Fixtures.AdminContext, entity, s.Fixtures.CreatePoolParams)
if err != nil {
s.FailNow(fmt.Sprintf("cannot create repo pool: %v", err))
}
@@ -556,7 +689,7 @@ func (s *RepoTestSuite) TestListRepoInstancesErrUnauthorized() {
func (s *RepoTestSuite) TestFindRepoPoolManager() {
s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, nil)
- poolManager, err := s.Runner.findRepoPoolManager(s.Fixtures.StoreRepos["test-repo-1"].Owner, s.Fixtures.StoreRepos["test-repo-1"].Name)
+ poolManager, err := s.Runner.findRepoPoolManager(s.Fixtures.StoreRepos["test-repo-1"].Owner, s.Fixtures.StoreRepos["test-repo-1"].Name, s.Fixtures.StoreRepos["test-repo-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
@@ -567,7 +700,7 @@ func (s *RepoTestSuite) TestFindRepoPoolManager() {
func (s *RepoTestSuite) TestFindRepoPoolManagerFetchPoolMgrFailed() {
s.Fixtures.PoolMgrCtrlMock.On("GetRepoPoolManager", mock.AnythingOfType("params.Repository")).Return(s.Fixtures.PoolMgrMock, s.Fixtures.ErrMock)
- _, err := s.Runner.findRepoPoolManager(s.Fixtures.StoreRepos["test-repo-1"].Owner, s.Fixtures.StoreRepos["test-repo-1"].Name)
+ _, err := s.Runner.findRepoPoolManager(s.Fixtures.StoreRepos["test-repo-1"].Owner, s.Fixtures.StoreRepos["test-repo-1"].Name, s.Fixtures.StoreRepos["test-repo-1"].Endpoint.Name)
s.Fixtures.PoolMgrMock.AssertExpectations(s.T())
s.Fixtures.PoolMgrCtrlMock.AssertExpectations(s.T())
diff --git a/runner/runner.go b/runner/runner.go
index fa28ae66..bf081522 100644
--- a/runner/runner.go
+++ b/runner/runner.go
@@ -17,50 +17,45 @@ package runner
import (
"context"
"crypto/hmac"
- "crypto/sha1"
+ "crypto/sha1" //nolint:golangci-lint,gosec // sha1 is used for github webhooks
"crypto/sha256"
"encoding/hex"
"encoding/json"
+ "errors"
"fmt"
"hash"
- "log"
+ "log/slog"
+ "net/url"
"os"
"strings"
"sync"
"time"
+ "golang.org/x/sync/errgroup"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm-provider-common/util"
"github.com/cloudbase/garm/auth"
"github.com/cloudbase/garm/config"
- "github.com/cloudbase/garm/database"
dbCommon "github.com/cloudbase/garm/database/common"
- runnerErrors "github.com/cloudbase/garm/errors"
"github.com/cloudbase/garm/params"
"github.com/cloudbase/garm/runner/common"
"github.com/cloudbase/garm/runner/pool"
"github.com/cloudbase/garm/runner/providers"
- providerCommon "github.com/cloudbase/garm/runner/providers/common"
- "github.com/cloudbase/garm/util"
-
- "github.com/juju/clock"
- "github.com/juju/retry"
- "github.com/pkg/errors"
- uuid "github.com/satori/go.uuid"
+ "github.com/cloudbase/garm/util/github"
+ "github.com/cloudbase/garm/util/github/scalesets"
)
-func NewRunner(ctx context.Context, cfg config.Config) (*Runner, error) {
- db, err := database.NewDatabase(ctx, cfg.Database)
+func NewRunner(ctx context.Context, cfg config.Config, db dbCommon.Store) (*Runner, error) {
+ ctrlID, err := db.ControllerInfo()
if err != nil {
- return nil, errors.Wrap(err, "creating db connection")
+ return nil, fmt.Errorf("error fetching controller info: %w", err)
}
- ctrlId, err := db.ControllerInfo()
+ providers, err := providers.LoadProvidersFromConfig(ctx, cfg, ctrlID.ControllerID.String())
if err != nil {
- return nil, errors.Wrap(err, "fetching controller info")
- }
-
- providers, err := providers.LoadProvidersFromConfig(ctx, cfg, ctrlId.ControllerID.String())
- if err != nil {
- return nil, errors.Wrap(err, "loading providers")
+ return nil, fmt.Errorf("error loading providers: %w", err)
}
creds := map[string]config.Github{}
@@ -70,9 +65,8 @@ func NewRunner(ctx context.Context, cfg config.Config) (*Runner, error) {
}
poolManagerCtrl := &poolManagerCtrl{
- controllerID: ctrlId.ControllerID.String(),
config: cfg,
- credentials: creds,
+ store: db,
repositories: map[string]common.PoolManager{},
organizations: map[string]common.PoolManager{},
enterprises: map[string]common.PoolManager{},
@@ -83,12 +77,10 @@ func NewRunner(ctx context.Context, cfg config.Config) (*Runner, error) {
store: db,
poolManagerCtrl: poolManagerCtrl,
providers: providers,
- credentials: creds,
- controllerID: ctrlId.ControllerID,
}
if err := runner.loadReposOrgsAndEnterprises(); err != nil {
- return nil, errors.Wrap(err, "loading pool managers")
+ return nil, fmt.Errorf("error loading pool managers: %w", err)
}
return runner, nil
@@ -97,9 +89,8 @@ func NewRunner(ctx context.Context, cfg config.Config) (*Runner, error) {
type poolManagerCtrl struct {
mux sync.Mutex
- controllerID string
- config config.Config
- credentials map[string]config.Github
+ config config.Config
+ store dbCommon.Store
repositories map[string]common.PoolManager
organizations map[string]common.PoolManager
@@ -110,13 +101,18 @@ func (p *poolManagerCtrl) CreateRepoPoolManager(ctx context.Context, repo params
p.mux.Lock()
defer p.mux.Unlock()
- cfgInternal, err := p.getInternalConfig(repo.CredentialsName)
+ entity, err := repo.GetEntity()
if err != nil {
- return nil, errors.Wrap(err, "fetching internal config")
+ return nil, fmt.Errorf("error getting entity: %w", err)
}
- poolManager, err := pool.NewRepositoryPoolManager(ctx, repo, cfgInternal, providers, store)
+
+ instanceTokenGetter, err := auth.NewInstanceTokenGetter(p.config.JWTAuth.Secret)
if err != nil {
- return nil, errors.Wrap(err, "creating repo pool manager")
+ return nil, fmt.Errorf("error creating instance token getter: %w", err)
+ }
+ poolManager, err := pool.NewEntityPoolManager(ctx, entity, instanceTokenGetter, providers, store)
+ if err != nil {
+ return nil, fmt.Errorf("error creating repo pool manager: %w", err)
}
p.repositories[repo.ID] = poolManager
return poolManager, nil
@@ -126,7 +122,7 @@ func (p *poolManagerCtrl) GetRepoPoolManager(repo params.Repository) (common.Poo
if repoPoolMgr, ok := p.repositories[repo.ID]; ok {
return repoPoolMgr, nil
}
- return nil, errors.Wrapf(runnerErrors.ErrNotFound, "repository %s/%s pool manager not loaded", repo.Owner, repo.Name)
+ return nil, fmt.Errorf("repository %s/%s pool manager not loaded: %w", repo.Owner, repo.Name, runnerErrors.ErrNotFound)
}
func (p *poolManagerCtrl) DeleteRepoPoolManager(repo params.Repository) error {
@@ -136,7 +132,7 @@ func (p *poolManagerCtrl) DeleteRepoPoolManager(repo params.Repository) error {
poolMgr, ok := p.repositories[repo.ID]
if ok {
if err := poolMgr.Stop(); err != nil {
- return errors.Wrap(err, "stopping repo pool manager")
+ return fmt.Errorf("error stopping repo pool manager: %w", err)
}
delete(p.repositories, repo.ID)
}
@@ -151,13 +147,18 @@ func (p *poolManagerCtrl) CreateOrgPoolManager(ctx context.Context, org params.O
p.mux.Lock()
defer p.mux.Unlock()
- cfgInternal, err := p.getInternalConfig(org.CredentialsName)
+ entity, err := org.GetEntity()
if err != nil {
- return nil, errors.Wrap(err, "fetching internal config")
+ return nil, fmt.Errorf("error getting entity: %w", err)
}
- poolManager, err := pool.NewOrganizationPoolManager(ctx, org, cfgInternal, providers, store)
+
+ instanceTokenGetter, err := auth.NewInstanceTokenGetter(p.config.JWTAuth.Secret)
if err != nil {
- return nil, errors.Wrap(err, "creating org pool manager")
+ return nil, fmt.Errorf("error creating instance token getter: %w", err)
+ }
+ poolManager, err := pool.NewEntityPoolManager(ctx, entity, instanceTokenGetter, providers, store)
+ if err != nil {
+ return nil, fmt.Errorf("error creating org pool manager: %w", err)
}
p.organizations[org.ID] = poolManager
return poolManager, nil
@@ -167,7 +168,7 @@ func (p *poolManagerCtrl) GetOrgPoolManager(org params.Organization) (common.Poo
if orgPoolMgr, ok := p.organizations[org.ID]; ok {
return orgPoolMgr, nil
}
- return nil, errors.Wrapf(runnerErrors.ErrNotFound, "organization %s pool manager not loaded", org.Name)
+ return nil, fmt.Errorf("organization %s pool manager not loaded: %w", org.Name, runnerErrors.ErrNotFound)
}
func (p *poolManagerCtrl) DeleteOrgPoolManager(org params.Organization) error {
@@ -177,7 +178,7 @@ func (p *poolManagerCtrl) DeleteOrgPoolManager(org params.Organization) error {
poolMgr, ok := p.organizations[org.ID]
if ok {
if err := poolMgr.Stop(); err != nil {
- return errors.Wrap(err, "stopping org pool manager")
+ return fmt.Errorf("error stopping org pool manager: %w", err)
}
delete(p.organizations, org.ID)
}
@@ -192,13 +193,18 @@ func (p *poolManagerCtrl) CreateEnterprisePoolManager(ctx context.Context, enter
p.mux.Lock()
defer p.mux.Unlock()
- cfgInternal, err := p.getInternalConfig(enterprise.CredentialsName)
+ entity, err := enterprise.GetEntity()
if err != nil {
- return nil, errors.Wrap(err, "fetching internal config")
+ return nil, fmt.Errorf("error getting entity: %w", err)
}
- poolManager, err := pool.NewEnterprisePoolManager(ctx, enterprise, cfgInternal, providers, store)
+
+ instanceTokenGetter, err := auth.NewInstanceTokenGetter(p.config.JWTAuth.Secret)
if err != nil {
- return nil, errors.Wrap(err, "creating enterprise pool manager")
+ return nil, fmt.Errorf("error creating instance token getter: %w", err)
+ }
+ poolManager, err := pool.NewEntityPoolManager(ctx, entity, instanceTokenGetter, providers, store)
+ if err != nil {
+ return nil, fmt.Errorf("error creating enterprise pool manager: %w", err)
}
p.enterprises[enterprise.ID] = poolManager
return poolManager, nil
@@ -208,7 +214,7 @@ func (p *poolManagerCtrl) GetEnterprisePoolManager(enterprise params.Enterprise)
if enterprisePoolMgr, ok := p.enterprises[enterprise.ID]; ok {
return enterprisePoolMgr, nil
}
- return nil, errors.Wrapf(runnerErrors.ErrNotFound, "enterprise %s pool manager not loaded", enterprise.Name)
+ return nil, fmt.Errorf("enterprise %s pool manager not loaded: %w", enterprise.Name, runnerErrors.ErrNotFound)
}
func (p *poolManagerCtrl) DeleteEnterprisePoolManager(enterprise params.Enterprise) error {
@@ -218,7 +224,7 @@ func (p *poolManagerCtrl) DeleteEnterprisePoolManager(enterprise params.Enterpri
poolMgr, ok := p.enterprises[enterprise.ID]
if ok {
if err := poolMgr.Stop(); err != nil {
- return errors.Wrap(err, "stopping enterprise pool manager")
+ return fmt.Errorf("error stopping enterprise pool manager: %w", err)
}
delete(p.enterprises, enterprise.ID)
}
@@ -229,34 +235,6 @@ func (p *poolManagerCtrl) GetEnterprisePoolManagers() (map[string]common.PoolMan
return p.enterprises, nil
}
-func (p *poolManagerCtrl) getInternalConfig(credsName string) (params.Internal, error) {
- creds, ok := p.credentials[credsName]
- if !ok {
- return params.Internal{}, runnerErrors.NewBadRequestError("invalid credential name (%s)", credsName)
- }
-
- caBundle, err := creds.CACertBundle()
- if err != nil {
- return params.Internal{}, fmt.Errorf("fetching CA bundle for creds: %w", err)
- }
-
- return params.Internal{
- OAuth2Token: creds.OAuth2Token,
- ControllerID: p.controllerID,
- InstanceCallbackURL: p.config.Default.CallbackURL,
- InstanceMetadataURL: p.config.Default.MetadataURL,
- JWTSecret: p.config.JWTAuth.Secret,
- GithubCredentialsDetails: params.GithubCredentials{
- Name: creds.Name,
- Description: creds.Description,
- BaseURL: creds.BaseEndpoint(),
- APIBaseURL: creds.APIEndpoint(),
- UploadBaseURL: creds.UploadEndpoint(),
- CABundle: caBundle,
- },
- }, nil
-}
-
type Runner struct {
mux sync.Mutex
@@ -266,11 +244,24 @@ type Runner struct {
poolManagerCtrl PoolManagerController
- providers map[string]common.Provider
- credentials map[string]config.Github
+ providers map[string]common.Provider
+}
- controllerInfo params.ControllerInfo
- controllerID uuid.UUID
+// UpdateController will update the controller settings.
+func (r *Runner) UpdateController(ctx context.Context, param params.UpdateControllerParams) (params.ControllerInfo, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ControllerInfo{}, runnerErrors.ErrUnauthorized
+ }
+
+ if err := param.Validate(); err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error validating controller update params: %w", err)
+ }
+
+ info, err := r.store.UpdateController(param)
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error updating controller info: %w", err)
+ }
+ return info, nil
}
// GetControllerInfo returns the controller id and the hostname.
@@ -288,45 +279,34 @@ func (r *Runner) GetControllerInfo(ctx context.Context) (params.ControllerInfo,
// As a side note, Windows requires a reboot for the hostname change to take effect,
// so if we'll ever support Windows as a target system, the hostname can be cached.
var hostname string
- err := retry.Call(retry.CallArgs{
- Func: func() error {
- var err error
- hostname, err = os.Hostname()
- if err != nil {
- return errors.Wrap(err, "fetching hostname")
+ var err error
+ for range 10 {
+ hostname, err = os.Hostname()
+ if err != nil {
+ select {
+ case <-time.After(10 * time.Millisecond):
+ continue
+ case <-ctx.Done():
}
- return nil
- },
- Attempts: 10,
- Delay: 100 * time.Millisecond,
- Clock: clock.WallClock,
- })
+ return params.ControllerInfo{}, fmt.Errorf("error fetching hostname: %w", err)
+ }
+ break
+ }
if err != nil {
- return params.ControllerInfo{}, errors.Wrap(err, "fetching hostname")
+ return params.ControllerInfo{}, fmt.Errorf("error fetching hostname: %w", err)
}
- r.controllerInfo.Hostname = hostname
- return params.ControllerInfo{
- ControllerID: r.controllerID,
- Hostname: hostname,
- }, nil
-}
-func (r *Runner) ListCredentials(ctx context.Context) ([]params.GithubCredentials, error) {
- if !auth.IsAdmin(ctx) {
- return nil, runnerErrors.ErrUnauthorized
+ info, err := r.store.ControllerInfo()
+ if err != nil {
+ return params.ControllerInfo{}, fmt.Errorf("error fetching controller info: %w", err)
}
- ret := []params.GithubCredentials{}
- for _, val := range r.config.Github {
- ret = append(ret, params.GithubCredentials{
- Name: val.Name,
- Description: val.Description,
- BaseURL: val.BaseEndpoint(),
- APIBaseURL: val.APIEndpoint(),
- UploadBaseURL: val.UploadEndpoint(),
- })
- }
- return ret, nil
+ // This is temporary. Right now, GARM is a single-instance deployment. When we add the
+ // ability to scale out, the hostname field will be moved form here to a dedicated node
+ // object. As a single controller will be made up of multiple nodes, we will need to model
+ // that aspect of GARM.
+ info.Hostname = hostname
+ return info, nil
}
func (r *Runner) ListProviders(ctx context.Context) ([]params.Provider, error) {
@@ -345,59 +325,54 @@ func (r *Runner) loadReposOrgsAndEnterprises() error {
r.mux.Lock()
defer r.mux.Unlock()
- repos, err := r.store.ListRepositories(r.ctx)
+ repos, err := r.store.ListRepositories(r.ctx, params.RepositoryFilter{})
if err != nil {
- return errors.Wrap(err, "fetching repositories")
+ return fmt.Errorf("error fetching repositories: %w", err)
}
- orgs, err := r.store.ListOrganizations(r.ctx)
+ orgs, err := r.store.ListOrganizations(r.ctx, params.OrganizationFilter{})
if err != nil {
- return errors.Wrap(err, "fetching organizations")
+ return fmt.Errorf("error fetching organizations: %w", err)
}
- enterprises, err := r.store.ListEnterprises(r.ctx)
+ enterprises, err := r.store.ListEnterprises(r.ctx, params.EnterpriseFilter{})
if err != nil {
- return errors.Wrap(err, "fetching enterprises")
+ return fmt.Errorf("error fetching enterprises: %w", err)
}
- expectedReplies := len(repos) + len(orgs) + len(enterprises)
- errChan := make(chan error, expectedReplies)
-
+ g, _ := errgroup.WithContext(r.ctx)
for _, repo := range repos {
- go func(repo params.Repository) {
- log.Printf("creating pool manager for repo %s/%s", repo.Owner, repo.Name)
+ repo := repo
+ g.Go(func() error {
+ slog.InfoContext(
+ r.ctx, "creating pool manager for repo",
+ "repo_owner", repo.Owner, "repo_name", repo.Name)
_, err := r.poolManagerCtrl.CreateRepoPoolManager(r.ctx, repo, r.providers, r.store)
- errChan <- err
- }(repo)
+ return err
+ })
}
for _, org := range orgs {
- go func(org params.Organization) {
- log.Printf("creating pool manager for organization %s", org.Name)
+ org := org
+ g.Go(func() error {
+ slog.InfoContext(r.ctx, "creating pool manager for organization", "org_name", org.Name)
_, err := r.poolManagerCtrl.CreateOrgPoolManager(r.ctx, org, r.providers, r.store)
- errChan <- err
- }(org)
+ return err
+ })
}
for _, enterprise := range enterprises {
- go func(enterprise params.Enterprise) {
- log.Printf("creating pool manager for enterprise %s", enterprise.Name)
+ enterprise := enterprise
+ g.Go(func() error {
+ slog.InfoContext(r.ctx, "creating pool manager for enterprise", "enterprise_name", enterprise.Name)
_, err := r.poolManagerCtrl.CreateEnterprisePoolManager(r.ctx, enterprise, r.providers, r.store)
- errChan <- err
- }(enterprise)
+ return err
+ })
}
- for i := 0; i < expectedReplies; i++ {
- select {
- case err := <-errChan:
- if err != nil {
- return errors.Wrap(err, "failed to load pool managers for repos and orgs")
- }
- case <-time.After(60 * time.Second):
- return fmt.Errorf("timed out waiting for pool manager load")
- }
+ if err := r.waitForErrorGroupOrTimeout(g); err != nil {
+ return fmt.Errorf("failed to create pool managers: %w", err)
}
-
return nil
}
@@ -407,123 +382,122 @@ func (r *Runner) Start() error {
repositories, err := r.poolManagerCtrl.GetRepoPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch repo pool managers")
+ return fmt.Errorf("error fetch repo pool managers: %w", err)
}
organizations, err := r.poolManagerCtrl.GetOrgPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch org pool managers")
+ return fmt.Errorf("error fetch org pool managers: %w", err)
}
enterprises, err := r.poolManagerCtrl.GetEnterprisePoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch enterprise pool managers")
+ return fmt.Errorf("error fetch enterprise pool managers: %w", err)
}
- expectedReplies := len(repositories) + len(organizations) + len(enterprises)
- errChan := make(chan error, expectedReplies)
-
+ g, _ := errgroup.WithContext(r.ctx)
for _, repo := range repositories {
- go func(repo common.PoolManager) {
- err := repo.Start()
- errChan <- err
- }(repo)
+ repo := repo
+ g.Go(func() error {
+ return repo.Start()
+ })
}
for _, org := range organizations {
- go func(org common.PoolManager) {
- err := org.Start()
- errChan <- err
- }(org)
+ org := org
+ g.Go(func() error {
+ return org.Start()
+ })
}
for _, enterprise := range enterprises {
- go func(org common.PoolManager) {
- err := org.Start()
- errChan <- err
- }(enterprise)
+ enterprise := enterprise
+ g.Go(func() error {
+ return enterprise.Start()
+ })
}
- for i := 0; i < expectedReplies; i++ {
- select {
- case err := <-errChan:
- if err != nil {
- return errors.Wrap(err, "starting pool manager")
- }
- case <-time.After(60 * time.Second):
- return fmt.Errorf("timed out waiting for pool mamager start")
- }
+ if err := r.waitForErrorGroupOrTimeout(g); err != nil {
+ return fmt.Errorf("failed to start pool managers: %w", err)
}
return nil
}
+func (r *Runner) waitForErrorGroupOrTimeout(g *errgroup.Group) error {
+ if g == nil {
+ return nil
+ }
+
+ done := make(chan error, 1)
+ go func() {
+ done <- g.Wait()
+ }()
+ timer := time.NewTimer(60 * time.Second)
+ defer timer.Stop()
+ select {
+ case err := <-done:
+ return err
+ case <-timer.C:
+ return fmt.Errorf("timed out waiting for pool manager start")
+ }
+}
+
func (r *Runner) Stop() error {
r.mux.Lock()
defer r.mux.Unlock()
repos, err := r.poolManagerCtrl.GetRepoPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch repo pool managers")
+ return fmt.Errorf("error fetching repo pool managers: %w", err)
}
orgs, err := r.poolManagerCtrl.GetOrgPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch org pool managers")
+ return fmt.Errorf("error fetching org pool managers: %w", err)
}
enterprises, err := r.poolManagerCtrl.GetEnterprisePoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch enterprise pool managers")
+ return fmt.Errorf("error fetching enterprise pool managers: %w", err)
}
- expectedReplies := len(repos) + len(orgs) + len(enterprises)
- errChan := make(chan error, expectedReplies)
+ g, _ := errgroup.WithContext(r.ctx)
for _, repo := range repos {
- go func(poolMgr common.PoolManager) {
+ poolMgr := repo
+ g.Go(func() error {
err := poolMgr.Stop()
if err != nil {
- errChan <- err
- return
+ return fmt.Errorf("failed to stop repo pool manager: %w", err)
}
- err = poolMgr.Wait()
- errChan <- err
- }(repo)
+ return poolMgr.Wait()
+ })
}
for _, org := range orgs {
- go func(poolMgr common.PoolManager) {
+ poolMgr := org
+ g.Go(func() error {
err := poolMgr.Stop()
if err != nil {
- errChan <- err
- return
+ return fmt.Errorf("failed to stop org pool manager: %w", err)
}
- err = poolMgr.Wait()
- errChan <- err
- }(org)
+ return poolMgr.Wait()
+ })
}
for _, enterprise := range enterprises {
- go func(poolMgr common.PoolManager) {
+ poolMgr := enterprise
+ g.Go(func() error {
err := poolMgr.Stop()
if err != nil {
- errChan <- err
- return
+ return fmt.Errorf("failed to stop enterprise pool manager: %w", err)
}
- err = poolMgr.Wait()
- errChan <- err
- }(enterprise)
+ return poolMgr.Wait()
+ })
}
- for i := 0; i < expectedReplies; i++ {
- select {
- case err := <-errChan:
- if err != nil {
- return errors.Wrap(err, "stopping pool manager")
- }
- case <-time.After(60 * time.Second):
- return fmt.Errorf("timed out waiting for pool mamager stop")
- }
+ if err := r.waitForErrorGroupOrTimeout(g); err != nil {
+ return fmt.Errorf("failed to stop pool managers: %w", err)
}
return nil
}
@@ -536,47 +510,47 @@ func (r *Runner) Wait() error {
repos, err := r.poolManagerCtrl.GetRepoPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch repo pool managers")
+ return fmt.Errorf("error fetching repo pool managers: %w", err)
}
orgs, err := r.poolManagerCtrl.GetOrgPoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch org pool managers")
+ return fmt.Errorf("error fetching org pool managers: %w", err)
}
enterprises, err := r.poolManagerCtrl.GetEnterprisePoolManagers()
if err != nil {
- return errors.Wrap(err, "fetch enterprise pool managers")
+ return fmt.Errorf("error fetching enterprise pool managers: %w", err)
}
- for poolId, repo := range repos {
+ for poolID, repo := range repos {
wg.Add(1)
go func(id string, poolMgr common.PoolManager) {
defer wg.Done()
if err := poolMgr.Wait(); err != nil {
- log.Printf("timed out waiting for pool manager %s to exit", id)
+ slog.With(slog.Any("error", err)).ErrorContext(r.ctx, "timed out waiting for pool manager to exit", "pool_id", id, "pool_mgr_id", poolMgr.ID())
}
- }(poolId, repo)
+ }(poolID, repo)
}
- for poolId, org := range orgs {
+ for poolID, org := range orgs {
wg.Add(1)
go func(id string, poolMgr common.PoolManager) {
defer wg.Done()
if err := poolMgr.Wait(); err != nil {
- log.Printf("timed out waiting for pool manager %s to exit", id)
+ slog.With(slog.Any("error", err)).ErrorContext(r.ctx, "timed out waiting for pool manager to exit", "pool_id", id)
}
- }(poolId, org)
+ }(poolID, org)
}
- for poolId, enterprise := range enterprises {
+ for poolID, enterprise := range enterprises {
wg.Add(1)
go func(id string, poolMgr common.PoolManager) {
defer wg.Done()
if err := poolMgr.Wait(); err != nil {
- log.Printf("timed out waiting for pool manager %s to exit", id)
+ slog.With(slog.Any("error", err)).ErrorContext(r.ctx, "timed out waiting for pool manager to exit", "pool_id", id)
}
- }(poolId, enterprise)
+ }(poolID, enterprise)
}
wg.Wait()
@@ -615,7 +589,7 @@ func (r *Runner) validateHookBody(signature, secret string, body []byte) error {
mac := hmac.New(hashFunc, []byte(secret))
_, err := mac.Write(body)
if err != nil {
- return errors.Wrap(err, "failed to compute sha256")
+ return fmt.Errorf("failed to compute sha256: %w", err)
}
expectedMAC := hex.EncodeToString(mac.Sum(nil))
@@ -626,47 +600,105 @@ func (r *Runner) validateHookBody(signature, secret string, body []byte) error {
return nil
}
-func (r *Runner) DispatchWorkflowJob(hookTargetType, signature string, jobData []byte) error {
+func (r *Runner) findEndpointForJob(job params.WorkflowJob, forgeType params.EndpointType) (params.ForgeEndpoint, error) {
+ uri, err := url.ParseRequestURI(job.WorkflowJob.HTMLURL)
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error parsing job URL: %w", err)
+ }
+ baseURI := fmt.Sprintf("%s://%s", uri.Scheme, uri.Host)
+
+ // Note(gabriel-samfira): Endpoints should be cached. We don't expect to have a large number
+ // of endpoints. In most cases there will be just one (github.com). In cases where there is
+ // a GHES involved, those users will have just one extra endpoint or 2 (if they also have a
+ // test env). But there should be a relatively small number, regardless. So we don't really care
+ // that much about the performance of this function.
+ var endpoints []params.ForgeEndpoint
+ switch forgeType {
+ case params.GithubEndpointType:
+ endpoints, err = r.store.ListGithubEndpoints(r.ctx)
+ case params.GiteaEndpointType:
+ endpoints, err = r.store.ListGiteaEndpoints(r.ctx)
+ default:
+ return params.ForgeEndpoint{}, runnerErrors.NewBadRequestError("unknown forge type %s", forgeType)
+ }
+
+ if err != nil {
+ return params.ForgeEndpoint{}, fmt.Errorf("error fetching github endpoints: %w", err)
+ }
+ for _, ep := range endpoints {
+ slog.DebugContext(r.ctx, "checking endpoint", "base_uri", baseURI, "endpoint", ep.BaseURL)
+ epBaseURI := strings.TrimSuffix(ep.BaseURL, "/")
+ if epBaseURI == baseURI {
+ return ep, nil
+ }
+ }
+
+ return params.ForgeEndpoint{}, runnerErrors.NewNotFoundError("no endpoint found for job")
+}
+
+func (r *Runner) DispatchWorkflowJob(hookTargetType, signature string, forgeType params.EndpointType, jobData []byte) error {
if len(jobData) == 0 {
+ slog.ErrorContext(r.ctx, "missing job data")
return runnerErrors.NewBadRequestError("missing job data")
}
var job params.WorkflowJob
if err := json.Unmarshal(jobData, &job); err != nil {
- return errors.Wrapf(runnerErrors.ErrBadRequest, "invalid job data: %s", err)
+ slog.ErrorContext(r.ctx, "failed to unmarshal job data", "error", err)
+ return fmt.Errorf("invalid job data %s: %w", err, runnerErrors.ErrBadRequest)
+ }
+
+ endpoint, err := r.findEndpointForJob(job, forgeType)
+ if err != nil {
+ slog.ErrorContext(r.ctx, "failed to find endpoint for job", "error", err)
+ return fmt.Errorf("error finding endpoint for job: %w", err)
}
var poolManager common.PoolManager
- var err error
switch HookTargetType(hookTargetType) {
case RepoHook:
- log.Printf("got hook for repo %s/%s", util.SanitizeLogEntry(job.Repository.Owner.Login), util.SanitizeLogEntry(job.Repository.Name))
- poolManager, err = r.findRepoPoolManager(job.Repository.Owner.Login, job.Repository.Name)
+ slog.DebugContext(
+ r.ctx, "got hook for repo",
+ "repo_owner", util.SanitizeLogEntry(job.Repository.Owner.Login),
+ "repo_name", util.SanitizeLogEntry(job.Repository.Name),
+ "endpoint", endpoint.Name)
+ poolManager, err = r.findRepoPoolManager(job.Repository.Owner.Login, job.Repository.Name, endpoint.Name)
case OrganizationHook:
- log.Printf("got hook for org %s", util.SanitizeLogEntry(job.Organization.Login))
- poolManager, err = r.findOrgPoolManager(job.Organization.Login)
+ slog.DebugContext(
+ r.ctx, "got hook for organization",
+ "organization", util.SanitizeLogEntry(job.GetOrgName(forgeType)),
+ "endpoint", endpoint.Name)
+ poolManager, err = r.findOrgPoolManager(job.GetOrgName(forgeType), endpoint.Name)
case EnterpriseHook:
- poolManager, err = r.findEnterprisePoolManager(job.Enterprise.Slug)
+ slog.DebugContext(
+ r.ctx, "got hook for enterprise",
+ "enterprise", util.SanitizeLogEntry(job.Enterprise.Slug),
+ "endpoint", endpoint.Name)
+ poolManager, err = r.findEnterprisePoolManager(job.Enterprise.Slug, endpoint.Name)
default:
return runnerErrors.NewBadRequestError("cannot handle hook target type %s", hookTargetType)
}
+ slog.DebugContext(r.ctx, "found pool manager", "pool_manager", poolManager.ID())
if err != nil {
+ slog.ErrorContext(r.ctx, "failed to find pool manager", "error", err, "hook_target_type", hookTargetType)
// We don't have a repository or organization configured that
// can handle this workflow job.
- return errors.Wrap(err, "fetching poolManager")
+ return fmt.Errorf("error fetching poolManager: %w", err)
}
// We found a pool. Validate the webhook job. If a secret is configured,
// we make sure that the source of this workflow job is valid.
secret := poolManager.WebhookSecret()
if err := r.validateHookBody(signature, secret, jobData); err != nil {
- return errors.Wrap(err, "validating webhook data")
+ slog.ErrorContext(r.ctx, "failed to validate webhook data", "error", err)
+ return fmt.Errorf("error validating webhook data: %w", err)
}
if err := poolManager.HandleWorkflowJob(job); err != nil {
- return errors.Wrap(err, "handling workflow job")
+ slog.ErrorContext(r.ctx, "failed to handle workflow job", "error", err)
+ return fmt.Errorf("error handling workflow job: %w", err)
}
return nil
@@ -674,7 +706,8 @@ func (r *Runner) DispatchWorkflowJob(hookTargetType, signature string, jobData [
func (r *Runner) appendTagsToCreatePoolParams(param params.CreatePoolParams) (params.CreatePoolParams, error) {
if err := param.Validate(); err != nil {
- return params.CreatePoolParams{}, errors.Wrapf(runnerErrors.ErrBadRequest, "validating params: %s", err)
+ return params.CreatePoolParams{}, fmt.Errorf("failed to validate params (%q): %w", err, runnerErrors.ErrBadRequest)
+ // errors.Wrapf(runnerErrors.ErrBadRequest, "validating params: %s", err)
}
if !IsSupportedOSType(param.OSType) {
@@ -690,57 +723,17 @@ func (r *Runner) appendTagsToCreatePoolParams(param params.CreatePoolParams) (pa
return params.CreatePoolParams{}, runnerErrors.NewBadRequestError("no such provider %s", param.ProviderName)
}
- newTags, err := r.processTags(string(param.OSArch), param.OSType, param.Tags)
- if err != nil {
- return params.CreatePoolParams{}, errors.Wrap(err, "processing tags")
- }
-
- param.Tags = newTags
-
return param, nil
}
-func (r *Runner) processTags(osArch string, osType params.OSType, tags []string) ([]string, error) {
- // github automatically adds the "self-hosted" tag as well as the OS type (linux, windows, etc)
- // and architecture (arm, x64, etc) to all self hosted runners. When a workflow job comes in, we try
- // to find a pool based on the labels that are set in the workflow. If we don't explicitly define these
- // default tags for each pool, and the user targets these labels, we won't be able to match any pools.
- // The downside is that all pools with the same OS and arch will have these default labels. Users should
- // set distinct and unique labels on each pool, and explicitly target those labels, or risk assigning
- // the job to the wrong worker type.
- ghArch, err := util.ResolveToGithubArch(osArch)
- if err != nil {
- return nil, errors.Wrap(err, "invalid arch")
- }
-
- ghOSType, err := util.ResolveToGithubTag(osType)
- if err != nil {
- return nil, errors.Wrap(err, "invalid os type")
- }
-
- labels := []string{
- "self-hosted",
- ghArch,
- ghOSType,
- }
-
- for _, val := range tags {
- if val != "self-hosted" && val != ghArch && val != ghOSType {
- labels = append(labels, val)
- }
- }
-
- return labels, nil
-}
-
func (r *Runner) GetInstance(ctx context.Context, instanceName string) (params.Instance, error) {
if !auth.IsAdmin(ctx) {
return params.Instance{}, runnerErrors.ErrUnauthorized
}
- instance, err := r.store.GetInstanceByName(ctx, instanceName)
+ instance, err := r.store.GetInstance(ctx, instanceName)
if err != nil {
- return params.Instance{}, errors.Wrap(err, "fetching instance")
+ return params.Instance{}, fmt.Errorf("error fetching instance: %w", err)
}
return instance, nil
}
@@ -752,19 +745,19 @@ func (r *Runner) ListAllInstances(ctx context.Context) ([]params.Instance, error
instances, err := r.store.ListAllInstances(ctx)
if err != nil {
- return nil, errors.Wrap(err, "fetching instances")
+ return nil, fmt.Errorf("error fetching instances: %w", err)
}
return instances, nil
}
func (r *Runner) AddInstanceStatusMessage(ctx context.Context, param params.InstanceUpdateMessage) error {
- instanceID := auth.InstanceID(ctx)
- if instanceID == "" {
+ instanceName := auth.InstanceName(ctx)
+ if instanceName == "" {
return runnerErrors.ErrUnauthorized
}
- if err := r.store.AddInstanceEvent(ctx, instanceID, params.StatusEvent, params.EventInfo, param.Message); err != nil {
- return errors.Wrap(err, "adding status update")
+ if err := r.store.AddInstanceEvent(ctx, instanceName, params.StatusEvent, params.EventInfo, param.Message); err != nil {
+ return fmt.Errorf("error adding status update: %w", err)
}
updateParams := params.UpdateInstanceParams{
@@ -775,125 +768,200 @@ func (r *Runner) AddInstanceStatusMessage(ctx context.Context, param params.Inst
updateParams.AgentID = *param.AgentID
}
- if _, err := r.store.UpdateInstance(r.ctx, instanceID, updateParams); err != nil {
- return errors.Wrap(err, "updating runner state")
+ if _, err := r.store.UpdateInstance(r.ctx, instanceName, updateParams); err != nil {
+ return fmt.Errorf("error updating runner agent ID: %w", err)
}
return nil
}
-func (r *Runner) GetInstanceGithubRegistrationToken(ctx context.Context) (string, error) {
+func (r *Runner) UpdateSystemInfo(ctx context.Context, param params.UpdateSystemInfoParams) error {
instanceName := auth.InstanceName(ctx)
if instanceName == "" {
- return "", runnerErrors.ErrUnauthorized
+ slog.ErrorContext(ctx, "missing instance name")
+ return runnerErrors.ErrUnauthorized
}
- // Check if this instance already fetched a registration token. We only allow an instance to
- // fetch one token. If the instance fails to bootstrap after a token is fetched, we reset the
- // token fetched field when re-queueing the instance.
- if auth.InstanceTokenFetched(ctx) {
- return "", runnerErrors.ErrUnauthorized
+ if param.OSName == "" && param.OSVersion == "" && param.AgentID == nil {
+ // Nothing to update
+ return nil
}
- status := auth.InstanceRunnerStatus(ctx)
- if status != providerCommon.RunnerPending && status != providerCommon.RunnerInstalling {
- return "", runnerErrors.ErrUnauthorized
- }
-
- instance, err := r.store.GetInstanceByName(ctx, instanceName)
- if err != nil {
- return "", errors.Wrap(err, "fetching instance")
- }
-
- poolMgr, err := r.getPoolManagerFromInstance(ctx, instance)
- if err != nil {
- return "", errors.Wrap(err, "fetching pool manager for instance")
- }
-
- token, err := poolMgr.GithubRunnerRegistrationToken()
- if err != nil {
- return "", errors.Wrap(err, "fetching runner token")
- }
-
- tokenFetched := true
updateParams := params.UpdateInstanceParams{
- TokenFetched: &tokenFetched,
+ OSName: param.OSName,
+ OSVersion: param.OSVersion,
}
- if _, err := r.store.UpdateInstance(r.ctx, instance.ID, updateParams); err != nil {
- return "", errors.Wrap(err, "setting token_fetched for instance")
+ if param.AgentID != nil {
+ updateParams.AgentID = *param.AgentID
}
- if err := r.store.AddInstanceEvent(ctx, instance.ID, params.FetchTokenEvent, params.EventInfo, "runner registration token was retrieved"); err != nil {
- return "", errors.Wrap(err, "recording event")
+ if _, err := r.store.UpdateInstance(r.ctx, instanceName, updateParams); err != nil {
+ return fmt.Errorf("error updating runner system info: %w", err)
}
- return token, nil
+ return nil
}
func (r *Runner) getPoolManagerFromInstance(ctx context.Context, instance params.Instance) (common.PoolManager, error) {
pool, err := r.store.GetPoolByID(ctx, instance.PoolID)
if err != nil {
- return nil, errors.Wrap(err, "fetching pool")
+ return nil, fmt.Errorf("error fetching pool: %w", err)
}
var poolMgr common.PoolManager
- if pool.RepoID != "" {
+ switch {
+ case pool.RepoID != "":
repo, err := r.store.GetRepositoryByID(ctx, pool.RepoID)
if err != nil {
- return nil, errors.Wrap(err, "fetching repo")
+ return nil, fmt.Errorf("error fetching repo: %w", err)
}
- poolMgr, err = r.findRepoPoolManager(repo.Owner, repo.Name)
+ poolMgr, err = r.findRepoPoolManager(repo.Owner, repo.Name, repo.Endpoint.Name)
if err != nil {
- return nil, errors.Wrapf(err, "fetching pool manager for repo %s", pool.RepoName)
+ return nil, fmt.Errorf("error fetching pool manager for repo %s: %w", pool.RepoName, err)
}
- } else if pool.OrgID != "" {
+ case pool.OrgID != "":
org, err := r.store.GetOrganizationByID(ctx, pool.OrgID)
if err != nil {
- return nil, errors.Wrap(err, "fetching org")
+ return nil, fmt.Errorf("error fetching org: %w", err)
}
- poolMgr, err = r.findOrgPoolManager(org.Name)
+ poolMgr, err = r.findOrgPoolManager(org.Name, org.Endpoint.Name)
if err != nil {
- return nil, errors.Wrapf(err, "fetching pool manager for org %s", pool.OrgName)
+ return nil, fmt.Errorf("error fetching pool manager for org %s: %w", pool.OrgName, err)
}
- } else if pool.EnterpriseID != "" {
+ case pool.EnterpriseID != "":
enterprise, err := r.store.GetEnterpriseByID(ctx, pool.EnterpriseID)
if err != nil {
- return nil, errors.Wrap(err, "fetching enterprise")
+ return nil, fmt.Errorf("error fetching enterprise: %w", err)
}
- poolMgr, err = r.findEnterprisePoolManager(enterprise.Name)
+ poolMgr, err = r.findEnterprisePoolManager(enterprise.Name, enterprise.Endpoint.Name)
if err != nil {
- return nil, errors.Wrapf(err, "fetching pool manager for enterprise %s", pool.EnterpriseName)
+ return nil, fmt.Errorf("error fetching pool manager for enterprise %s: %w", pool.EnterpriseName, err)
}
}
return poolMgr, nil
}
-func (r *Runner) ForceDeleteRunner(ctx context.Context, instanceName string) error {
+// DeleteRunner removes a runner from a pool. If forceDelete is true, GARM will ignore any provider errors
+// that may occur, and attempt to remove the runner from GitHub and then the database, regardless of provider
+// errors.
+func (r *Runner) DeleteRunner(ctx context.Context, instanceName string, forceDelete, bypassGithubUnauthorized bool) error {
if !auth.IsAdmin(ctx) {
return runnerErrors.ErrUnauthorized
}
- instance, err := r.store.GetInstanceByName(ctx, instanceName)
+ instance, err := r.store.GetInstance(ctx, instanceName)
if err != nil {
- return errors.Wrap(err, "fetching instance")
+ return fmt.Errorf("error fetching instance: %w", err)
}
switch instance.Status {
- case providerCommon.InstanceRunning, providerCommon.InstanceError:
+ case commonParams.InstanceRunning, commonParams.InstanceError,
+ commonParams.InstancePendingForceDelete, commonParams.InstancePendingDelete:
default:
- return runnerErrors.NewBadRequestError("runner must be in %q or %q state", providerCommon.InstanceRunning, providerCommon.InstanceError)
+ validStates := []string{
+ string(commonParams.InstanceRunning),
+ string(commonParams.InstanceError),
+ string(commonParams.InstancePendingForceDelete),
+ string(commonParams.InstancePendingDelete),
+ }
+ return runnerErrors.NewBadRequestError("runner must be in one of the following states: %q", strings.Join(validStates, ", "))
}
- poolMgr, err := r.getPoolManagerFromInstance(ctx, instance)
+ ghCli, ssCli, err := r.getGHCliFromInstance(ctx, instance)
if err != nil {
- return errors.Wrap(err, "fetching pool manager for instance")
+ return fmt.Errorf("error fetching github client: %w", err)
}
- if err := poolMgr.ForceDeleteRunner(instance); err != nil {
- return errors.Wrap(err, "removing runner")
+ if instance.AgentID != 0 {
+ switch {
+ case instance.ScaleSetID != 0:
+ err = ssCli.RemoveRunner(ctx, instance.AgentID)
+ case instance.PoolID != "":
+ err = ghCli.RemoveEntityRunner(ctx, instance.AgentID)
+ default:
+ return errors.New("instance does not have a pool or scale set")
+ }
+
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ if errors.Is(err, runnerErrors.ErrUnauthorized) && instance.PoolID != "" {
+ poolMgr, err := r.getPoolManagerFromInstance(ctx, instance)
+ if err != nil {
+ return fmt.Errorf("error fetching pool manager for instance: %w", err)
+ }
+ poolMgr.SetPoolRunningState(false, fmt.Sprintf("failed to remove runner: %q", err))
+ }
+ if !bypassGithubUnauthorized {
+ return fmt.Errorf("error removing runner from github: %w", err)
+ }
+ }
+ }
}
+
+ instanceStatus := commonParams.InstancePendingDelete
+ if forceDelete {
+ instanceStatus = commonParams.InstancePendingForceDelete
+ }
+
+ slog.InfoContext(
+ r.ctx, "setting instance status",
+ "runner_name", instance.Name,
+ "status", instanceStatus)
+
+ updateParams := params.UpdateInstanceParams{
+ Status: instanceStatus,
+ }
+ _, err = r.store.UpdateInstance(r.ctx, instance.Name, updateParams)
+ if err != nil {
+ return fmt.Errorf("error updating runner state: %w", err)
+ }
+
return nil
}
+
+func (r *Runner) getGHCliFromInstance(ctx context.Context, instance params.Instance) (common.GithubClient, *scalesets.ScaleSetClient, error) {
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): We can probably cache the entity.
+ var entityGetter params.EntityGetter
+ var err error
+
+ switch {
+ case instance.PoolID != "":
+ entityGetter, err = r.store.GetPoolByID(ctx, instance.PoolID)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error fetching pool: %w", err)
+ }
+ case instance.ScaleSetID != 0:
+ entityGetter, err = r.store.GetScaleSetByID(ctx, instance.ScaleSetID)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error fetching scale set: %w", err)
+ }
+ default:
+ return nil, nil, errors.New("instance does not have a pool or scale set")
+ }
+
+ entity, err := entityGetter.GetEntity()
+ if err != nil {
+ return nil, nil, fmt.Errorf("error fetching entity: %w", err)
+ }
+
+ // Fetching the entity from the database will populate all fields, including credentials.
+ entity, err = r.store.GetForgeEntity(ctx, entity.EntityType, entity.ID)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error fetching entity: %w", err)
+ }
+
+ ghCli, err := github.Client(ctx, entity)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error creating github client: %w", err)
+ }
+
+ scaleSetCli, err := scalesets.NewClient(ghCli)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error creating scaleset client: %w", err)
+ }
+ return ghCli, scaleSetCli, nil
+}
diff --git a/runner/scalesets.go b/runner/scalesets.go
new file mode 100644
index 00000000..136ddec2
--- /dev/null
+++ b/runner/scalesets.go
@@ -0,0 +1,297 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package runner
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/auth"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/util/appdefaults"
+ "github.com/cloudbase/garm/util/github"
+ "github.com/cloudbase/garm/util/github/scalesets"
+)
+
+func (r *Runner) ListAllScaleSets(ctx context.Context) ([]params.ScaleSet, error) {
+ if !auth.IsAdmin(ctx) {
+ return []params.ScaleSet{}, runnerErrors.ErrUnauthorized
+ }
+
+ scalesets, err := r.store.ListAllScaleSets(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching pools: %w", err)
+ }
+ return scalesets, nil
+}
+
+func (r *Runner) GetScaleSetByID(ctx context.Context, scaleSet uint) (params.ScaleSet, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ScaleSet{}, runnerErrors.ErrUnauthorized
+ }
+
+ set, err := r.store.GetScaleSetByID(ctx, scaleSet)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error fetching scale set: %w", err)
+ }
+ return set, nil
+}
+
+func (r *Runner) DeleteScaleSetByID(ctx context.Context, scaleSetID uint) error {
+ if !auth.IsAdmin(ctx) {
+ return runnerErrors.ErrUnauthorized
+ }
+
+ scaleSet, err := r.store.GetScaleSetByID(ctx, scaleSetID)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ return fmt.Errorf("error fetching scale set: %w", err)
+ }
+ return nil
+ }
+
+ if len(scaleSet.Instances) > 0 {
+ return runnerErrors.NewBadRequestError("scale set has runners")
+ }
+
+ if scaleSet.Enabled {
+ return runnerErrors.NewBadRequestError("scale set is enabled; disable it first")
+ }
+
+ paramEntity, err := scaleSet.GetEntity()
+ if err != nil {
+ return fmt.Errorf("error getting entity: %w", err)
+ }
+
+ entity, err := r.store.GetForgeEntity(ctx, paramEntity.EntityType, paramEntity.ID)
+ if err != nil {
+ return fmt.Errorf("error getting entity: %w", err)
+ }
+
+ ghCli, err := github.Client(ctx, entity)
+ if err != nil {
+ return fmt.Errorf("error creating github client: %w", err)
+ }
+
+ scalesetCli, err := scalesets.NewClient(ghCli)
+ if err != nil {
+ return fmt.Errorf("error getting scaleset client: %w", err)
+ }
+
+ slog.DebugContext(ctx, "deleting scale set", "scale_set_id", scaleSet.ScaleSetID)
+ if err := scalesetCli.DeleteRunnerScaleSet(ctx, scaleSet.ScaleSetID); err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ slog.InfoContext(ctx, "scale set not found", "scale_set_id", scaleSet.ScaleSetID)
+ return nil
+ }
+ slog.With(slog.Any("error", err)).ErrorContext(ctx, "failed to delete scale set from github")
+ return fmt.Errorf("error deleting scale set from github: %w", err)
+ }
+ if err := r.store.DeleteScaleSetByID(ctx, scaleSetID); err != nil {
+ return fmt.Errorf("error deleting scale set: %w", err)
+ }
+ return nil
+}
+
+func (r *Runner) UpdateScaleSetByID(ctx context.Context, scaleSetID uint, param params.UpdateScaleSetParams) (params.ScaleSet, error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ScaleSet{}, runnerErrors.ErrUnauthorized
+ }
+
+ scaleSet, err := r.store.GetScaleSetByID(ctx, scaleSetID)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error fetching scale set: %w", err)
+ }
+
+ maxRunners := scaleSet.MaxRunners
+ minIdleRunners := scaleSet.MinIdleRunners
+
+ if param.MaxRunners != nil {
+ maxRunners = *param.MaxRunners
+ }
+ if param.MinIdleRunners != nil {
+ minIdleRunners = *param.MinIdleRunners
+ }
+
+ if param.RunnerBootstrapTimeout != nil && *param.RunnerBootstrapTimeout == 0 {
+ return params.ScaleSet{}, runnerErrors.NewBadRequestError("runner_bootstrap_timeout cannot be 0")
+ }
+
+ if minIdleRunners > maxRunners {
+ return params.ScaleSet{}, runnerErrors.NewBadRequestError("min_idle_runners cannot be larger than max_runners")
+ }
+
+ paramEntity, err := scaleSet.GetEntity()
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error getting entity: %w", err)
+ }
+
+ entity, err := r.store.GetForgeEntity(ctx, paramEntity.EntityType, paramEntity.ID)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error getting entity: %w", err)
+ }
+
+ ghCli, err := github.Client(ctx, entity)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error creating github client: %w", err)
+ }
+
+ scalesetCli, err := scalesets.NewClient(ghCli)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error getting scaleset client: %w", err)
+ }
+
+ callback := func(old, newSet params.ScaleSet) error {
+ updateParams := params.RunnerScaleSet{}
+ hasUpdates := false
+ if old.Name != newSet.Name {
+ updateParams.Name = newSet.Name
+ hasUpdates = true
+ }
+
+ if old.GitHubRunnerGroup != newSet.GitHubRunnerGroup {
+ runnerGroup, err := scalesetCli.GetRunnerGroupByName(ctx, newSet.GitHubRunnerGroup)
+ if err != nil {
+ return fmt.Errorf("error fetching runner group from github: %w", err)
+ }
+ updateParams.RunnerGroupID = runnerGroup.ID
+ hasUpdates = true
+ }
+
+ if old.DisableUpdate != newSet.DisableUpdate {
+ updateParams.RunnerSetting.DisableUpdate = newSet.DisableUpdate
+ hasUpdates = true
+ }
+
+ if hasUpdates {
+ _, err := scalesetCli.UpdateRunnerScaleSet(ctx, newSet.ScaleSetID, updateParams)
+ if err != nil {
+ return fmt.Errorf("failed to update scaleset in github: %w", err)
+ }
+ }
+ return nil
+ }
+
+ newScaleSet, err := r.store.UpdateEntityScaleSet(ctx, entity, scaleSetID, param, callback)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error updating pool: %w", err)
+ }
+ return newScaleSet, nil
+}
+
+func (r *Runner) CreateEntityScaleSet(ctx context.Context, entityType params.ForgeEntityType, entityID string, param params.CreateScaleSetParams) (scaleSetRet params.ScaleSet, err error) {
+ if !auth.IsAdmin(ctx) {
+ return params.ScaleSet{}, runnerErrors.ErrUnauthorized
+ }
+
+ if param.RunnerBootstrapTimeout == 0 {
+ param.RunnerBootstrapTimeout = appdefaults.DefaultRunnerBootstrapTimeout
+ }
+
+ if param.GitHubRunnerGroup == "" {
+ param.GitHubRunnerGroup = "Default"
+ }
+
+ entity, err := r.store.GetForgeEntity(ctx, entityType, entityID)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error getting entity: %w", err)
+ }
+
+ if entity.Credentials.ForgeType != params.GithubEndpointType {
+ return params.ScaleSet{}, runnerErrors.NewBadRequestError("scale sets are only supported for github entities")
+ }
+
+ ghCli, err := github.Client(ctx, entity)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error creating github client: %w", err)
+ }
+
+ scalesetCli, err := scalesets.NewClient(ghCli)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error getting scaleset client: %w", err)
+ }
+
+ runnerGroupID, err := ghCli.GetEntityRunnerGroupIDByName(ctx, param.GitHubRunnerGroup)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("failed to get github runner group for entity %s: %w", entity.ID, err)
+ }
+
+ createParam := ¶ms.RunnerScaleSet{
+ Name: param.Name,
+ RunnerGroupID: runnerGroupID,
+ Labels: []params.Label{
+ {
+ Name: param.Name,
+ Type: "System",
+ },
+ },
+ RunnerSetting: params.RunnerSetting{
+ Ephemeral: true,
+ DisableUpdate: param.DisableUpdate,
+ },
+ Enabled: ¶m.Enabled,
+ }
+
+ runnerScaleSet, err := scalesetCli.CreateRunnerScaleSet(ctx, createParam)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error creating runner scale set: %w", err)
+ }
+
+ defer func() {
+ if err != nil {
+ if innerErr := scalesetCli.DeleteRunnerScaleSet(ctx, runnerScaleSet.ID); innerErr != nil {
+ slog.With(slog.Any("error", innerErr)).ErrorContext(ctx, "failed to cleanup scale set")
+ }
+ }
+ }()
+ param.ScaleSetID = runnerScaleSet.ID
+
+ scaleSet, err := r.store.CreateEntityScaleSet(ctx, entity, param)
+ if err != nil {
+ return params.ScaleSet{}, fmt.Errorf("error creating scale set: %w", err)
+ }
+
+ return scaleSet, nil
+}
+
+func (r *Runner) ListScaleSetInstances(ctx context.Context, scalesetID uint) ([]params.Instance, error) {
+ if !auth.IsAdmin(ctx) {
+ return nil, runnerErrors.ErrUnauthorized
+ }
+
+ instances, err := r.store.ListScaleSetInstances(ctx, scalesetID)
+ if err != nil {
+ return []params.Instance{}, fmt.Errorf("error fetching instances: %w", err)
+ }
+ return instances, nil
+}
+
+func (r *Runner) ListEntityScaleSets(ctx context.Context, entityType params.ForgeEntityType, entityID string) ([]params.ScaleSet, error) {
+ if !auth.IsAdmin(ctx) {
+ return []params.ScaleSet{}, runnerErrors.ErrUnauthorized
+ }
+ entity := params.ForgeEntity{
+ ID: entityID,
+ EntityType: entityType,
+ }
+ scaleSets, err := r.store.ListEntityScaleSets(ctx, entity)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching scale sets: %w", err)
+ }
+ return scaleSets, nil
+}
diff --git a/runner/types.go b/runner/types.go
index 3a081e09..1fb38bb7 100644
--- a/runner/types.go
+++ b/runner/types.go
@@ -14,7 +14,7 @@
package runner
-import "github.com/cloudbase/garm/params"
+import "github.com/cloudbase/garm-provider-common/params"
type HookTargetType string
diff --git a/scripts/build-static.sh b/scripts/build-static.sh
index debcd5e4..1f81752e 100755
--- a/scripts/build-static.sh
+++ b/scripts/build-static.sh
@@ -1,21 +1,67 @@
#!/bin/sh
GARM_SOURCE="/build/garm"
-BIN_DIR="$GARM_SOURCE/bin"
-git config --global --add safe.directory "$GARM_SOURCE"
+git config --global --add safe.directory /build/garm
+cd $GARM_SOURCE
-[ ! -d "$BIN_DIR" ] && mkdir -p "$BIN_DIR"
+CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
+if [ ! -z "$GARM_REF" ] && [ "$GARM_REF" != "$CURRENT_BRANCH" ];then
+ git checkout $GARM_REF
+fi
+
+cd $GARM_SOURCE
+
+OUTPUT_DIR="/build/output"
+VERSION=$(git describe --tags --match='v[0-9]*' --dirty --always)
+BUILD_DIR="$OUTPUT_DIR/$VERSION"
+
+
+[ ! -d "$BUILD_DIR/linux" ] && mkdir -p "$BUILD_DIR/linux"
+[ ! -d "$BUILD_DIR/windows" ] && mkdir -p "$BUILD_DIR/windows"
export CGO_ENABLED=1
USER_ID=${USER_ID:-$UID}
USER_GROUP=${USER_GROUP:-$(id -g)}
+# Garm
cd $GARM_SOURCE/cmd/garm
-go build -mod vendor -o $BIN_DIR/garm -tags osusergo,netgo,sqlite_omit_load_extension -ldflags "-linkmode external -extldflags '-static' -s -w -X main.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" .
-# GOOS=windows CC=x86_64-w64-mingw32-cc go build -mod vendor -o $BIN_DIR/garm.exe -tags osusergo,netgo,sqlite_omit_load_extension -ldflags "-s -w -X main.Version=$(git describe --always --dirty)" .
+# Linux
+GOOS=linux GOARCH=amd64 go build -mod vendor \
+ -o $BUILD_DIR/linux/amd64/garm \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+GOOS=linux GOARCH=arm64 CC=aarch64-linux-musl-gcc go build \
+ -mod vendor \
+ -o $BUILD_DIR/linux/arm64/garm \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+
+# Windows
+GOOS=windows GOARCH=amd64 CC=x86_64-w64-mingw32-cc go build -mod vendor \
+ -o $BUILD_DIR/windows/amd64/garm.exe \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+
+# garm-cli
cd $GARM_SOURCE/cmd/garm-cli
-go build -mod vendor -o $BIN_DIR/garm-cli -tags osusergo,netgo -ldflags "-linkmode external -extldflags '-static' -s -w -X garm/cmd/garm-cli/cmd.Version=$(git describe --tags --match='v[0-9]*' --dirty --always)" .
-# GOOS=windows CGO_ENABLED=0 go build -mod vendor -o $BIN_DIR/garm-cli.exe -ldflags "-s -w -X garm/cmd/garm-cli/cmd.Version=$(git describe --always --dirty)" .
-chown $USER_ID:$USER_GROUP -R "$BIN_DIR"
+# Linux
+GOOS=linux GOARCH=amd64 go build -mod vendor \
+ -o $BUILD_DIR/linux/amd64/garm-cli \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+GOOS=linux GOARCH=arm64 CC=aarch64-linux-musl-gcc go build -mod vendor \
+ -o $BUILD_DIR/linux/arm64/garm-cli \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-extldflags '-static' -s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+
+# Windows
+GOOS=windows GOARCH=amd64 go build -mod vendor \
+ -o $BUILD_DIR/windows/amd64/garm-cli.exe \
+ -tags osusergo,netgo,sqlite_omit_load_extension \
+ -ldflags "-s -w -X github.com/cloudbase/garm/util/appdefaults.Version=$VERSION" .
+
+
+git checkout $CURRENT_BRANCH || true
+chown $USER_ID:$USER_GROUP -R "$OUTPUT_DIR"
diff --git a/scripts/make-release.sh b/scripts/make-release.sh
new file mode 100755
index 00000000..fc9c6b04
--- /dev/null
+++ b/scripts/make-release.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+echo $GARM_REF
+
+VERSION=$(git describe --tags --match='v[0-9]*' --dirty --always)
+RELEASE="$PWD/release"
+
+[ ! -d "$RELEASE" ] && mkdir -p "$RELEASE"
+
+if [ ! -z "$GARM_REF" ]; then
+ VERSION=$(git describe --tags --match='v[0-9]*' --always $GARM_REF)
+fi
+
+echo $VERSION
+
+if [ ! -d "build/$VERSION" ]; then
+ echo "missing build/$VERSION"
+ exit 1
+fi
+
+# Windows
+
+if [ ! -d "build/$VERSION/windows/amd64" ];then
+ echo "missing build/$VERSION/windows/amd64"
+ exit 1
+fi
+
+WINDOWS_FILES=("garm.exe" "garm-cli.exe")
+
+for file in ${WINDOWS_FILES[@]};do
+ if [ ! -f "build/$VERSION/windows/amd64/$file" ];then
+ echo "missing build/$VERSION/windows/amd64/$file"
+ exit 1
+ fi
+
+ pushd build/$VERSION/windows/amd64
+ zip ${file%%.exe}-windows-amd64.zip $file
+ sha256sum ${file%%.exe}-windows-amd64.zip > ${file%%.exe}-windows-amd64.zip.sha256
+ mv ${file%%.exe}-windows-amd64.zip $RELEASE
+ mv ${file%%.exe}-windows-amd64.zip.sha256 $RELEASE
+ popd
+done
+
+# Linux
+OS_ARCHES=("amd64" "arm64")
+FILES=("garm" "garm-cli")
+
+for arch in ${OS_ARCHES[@]};do
+ for file in ${FILES[@]};do
+ if [ ! -f "build/$VERSION/linux/$arch/$file" ];then
+ echo "missing build/$VERSION/linux/$arch/$file"
+ exit 1
+ fi
+
+ pushd build/$VERSION/linux/$arch
+ tar czf ${file}-linux-$arch.tgz $file
+ sha256sum ${file}-linux-$arch.tgz > ${file}-linux-$arch.tgz.sha256
+ mv ${file}-linux-$arch.tgz $RELEASE
+ mv ${file}-linux-$arch.tgz.sha256 $RELEASE
+ popd
+ done
+done
diff --git a/test/integration/client_utils.go b/test/integration/client_utils.go
new file mode 100644
index 00000000..e423c107
--- /dev/null
+++ b/test/integration/client_utils.go
@@ -0,0 +1,512 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package integration
+
+import (
+ "github.com/go-openapi/runtime"
+
+ "github.com/cloudbase/garm/client"
+ clientControllerInfo "github.com/cloudbase/garm/client/controller_info"
+ clientCredentials "github.com/cloudbase/garm/client/credentials"
+ clientEndpoints "github.com/cloudbase/garm/client/endpoints"
+ clientFirstRun "github.com/cloudbase/garm/client/first_run"
+ clientInstances "github.com/cloudbase/garm/client/instances"
+ clientJobs "github.com/cloudbase/garm/client/jobs"
+ clientLogin "github.com/cloudbase/garm/client/login"
+ clientMetricsToken "github.com/cloudbase/garm/client/metrics_token"
+ clientOrganizations "github.com/cloudbase/garm/client/organizations"
+ clientPools "github.com/cloudbase/garm/client/pools"
+ clientProviders "github.com/cloudbase/garm/client/providers"
+ clientRepositories "github.com/cloudbase/garm/client/repositories"
+ "github.com/cloudbase/garm/params"
+)
+
+// firstRun will initialize a new garm installation.
+func firstRun(apiCli *client.GarmAPI, newUser params.NewUserParams) (params.User, error) {
+ firstRunResponse, err := apiCli.FirstRun.FirstRun(
+ clientFirstRun.NewFirstRunParams().WithBody(newUser),
+ nil)
+ if err != nil {
+ return params.User{}, err
+ }
+ return firstRunResponse.Payload, nil
+}
+
+func login(apiCli *client.GarmAPI, params params.PasswordLoginParams) (string, error) {
+ loginResponse, err := apiCli.Login.Login(
+ clientLogin.NewLoginParams().WithBody(params),
+ nil)
+ if err != nil {
+ return "", err
+ }
+ return loginResponse.Payload.Token, nil
+}
+
+// listCredentials lists all the credentials configured in GARM.
+func listCredentials(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Credentials, error) {
+ listCredentialsResponse, err := apiCli.Credentials.ListCredentials(
+ clientCredentials.NewListCredentialsParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listCredentialsResponse.Payload, nil
+}
+
+func createGithubCredentials(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, credentialsParams params.CreateGithubCredentialsParams) (*params.ForgeCredentials, error) {
+ createCredentialsResponse, err := apiCli.Credentials.CreateCredentials(
+ clientCredentials.NewCreateCredentialsParams().WithBody(credentialsParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createCredentialsResponse.Payload, nil
+}
+
+func deleteGithubCredentials(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, credentialsID int64) error {
+ return apiCli.Credentials.DeleteCredentials(
+ clientCredentials.NewDeleteCredentialsParams().WithID(credentialsID),
+ apiAuthToken)
+}
+
+func updateGithubCredentials(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, credentialsID int64, credentialsParams params.UpdateGithubCredentialsParams) (*params.ForgeCredentials, error) {
+ updateCredentialsResponse, err := apiCli.Credentials.UpdateCredentials(
+ clientCredentials.NewUpdateCredentialsParams().WithID(credentialsID).WithBody(credentialsParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateCredentialsResponse.Payload, nil
+}
+
+func createGithubEndpoint(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, endpointParams params.CreateGithubEndpointParams) (*params.ForgeEndpoint, error) {
+ createEndpointResponse, err := apiCli.Endpoints.CreateGithubEndpoint(
+ clientEndpoints.NewCreateGithubEndpointParams().WithBody(endpointParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createEndpointResponse.Payload, nil
+}
+
+func listGithubEndpoints(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.ForgeEndpoints, error) {
+ listEndpointsResponse, err := apiCli.Endpoints.ListGithubEndpoints(
+ clientEndpoints.NewListGithubEndpointsParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listEndpointsResponse.Payload, nil
+}
+
+func getGithubEndpoint(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, endpointName string) (*params.ForgeEndpoint, error) {
+ getEndpointResponse, err := apiCli.Endpoints.GetGithubEndpoint(
+ clientEndpoints.NewGetGithubEndpointParams().WithName(endpointName),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getEndpointResponse.Payload, nil
+}
+
+func deleteGithubEndpoint(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, endpointName string) error {
+ return apiCli.Endpoints.DeleteGithubEndpoint(
+ clientEndpoints.NewDeleteGithubEndpointParams().WithName(endpointName),
+ apiAuthToken)
+}
+
+func updateGithubEndpoint(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, endpointName string, endpointParams params.UpdateGithubEndpointParams) (*params.ForgeEndpoint, error) {
+ updateEndpointResponse, err := apiCli.Endpoints.UpdateGithubEndpoint(
+ clientEndpoints.NewUpdateGithubEndpointParams().WithName(endpointName).WithBody(endpointParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateEndpointResponse.Payload, nil
+}
+
+// listProviders lists all the providers configured in GARM.
+func listProviders(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Providers, error) {
+ listProvidersResponse, err := apiCli.Providers.ListProviders(
+ clientProviders.NewListProvidersParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listProvidersResponse.Payload, nil
+}
+
+// getControllerInfo returns information about the GARM controller.
+func getControllerInfo(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.ControllerInfo, error) {
+ controllerInfoResponse, err := apiCli.ControllerInfo.ControllerInfo(
+ clientControllerInfo.NewControllerInfoParams(),
+ apiAuthToken)
+ if err != nil {
+ return params.ControllerInfo{}, err
+ }
+ return controllerInfoResponse.Payload, nil
+}
+
+// listJobs lists all the jobs configured in GARM.
+func listJobs(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Jobs, error) {
+ listJobsResponse, err := apiCli.Jobs.ListJobs(
+ clientJobs.NewListJobsParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listJobsResponse.Payload, nil
+}
+
+// getMetricsToken returns the metrics token.
+func getMetricsToken(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (string, error) {
+ getMetricsTokenResponse, err := apiCli.MetricsToken.GetMetricsToken(
+ clientMetricsToken.NewGetMetricsTokenParams(),
+ apiAuthToken)
+ if err != nil {
+ return "", err
+ }
+ return getMetricsTokenResponse.Payload.Token, nil
+}
+
+// ///////////////
+// Repositories //
+// ///////////////
+func createRepo(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoParams params.CreateRepoParams) (*params.Repository, error) {
+ createRepoResponse, err := apiCli.Repositories.CreateRepo(
+ clientRepositories.NewCreateRepoParams().WithBody(repoParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createRepoResponse.Payload, nil
+}
+
+func listRepos(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Repositories, error) {
+ listReposResponse, err := apiCli.Repositories.ListRepos(
+ clientRepositories.NewListReposParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listReposResponse.Payload, nil
+}
+
+func updateRepo(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string, repoParams params.UpdateEntityParams) (*params.Repository, error) {
+ updateRepoResponse, err := apiCli.Repositories.UpdateRepo(
+ clientRepositories.NewUpdateRepoParams().WithRepoID(repoID).WithBody(repoParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateRepoResponse.Payload, nil
+}
+
+func getRepo(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) (*params.Repository, error) {
+ getRepoResponse, err := apiCli.Repositories.GetRepo(
+ clientRepositories.NewGetRepoParams().WithRepoID(repoID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getRepoResponse.Payload, nil
+}
+
+func installRepoWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string, webhookParams params.InstallWebhookParams) (*params.HookInfo, error) {
+ installRepoWebhookResponse, err := apiCli.Repositories.InstallRepoWebhook(
+ clientRepositories.NewInstallRepoWebhookParams().WithRepoID(repoID).WithBody(webhookParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &installRepoWebhookResponse.Payload, nil
+}
+
+func getRepoWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) (*params.HookInfo, error) {
+ getRepoWebhookResponse, err := apiCli.Repositories.GetRepoWebhookInfo(
+ clientRepositories.NewGetRepoWebhookInfoParams().WithRepoID(repoID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getRepoWebhookResponse.Payload, nil
+}
+
+func uninstallRepoWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) error {
+ return apiCli.Repositories.UninstallRepoWebhook(
+ clientRepositories.NewUninstallRepoWebhookParams().WithRepoID(repoID),
+ apiAuthToken)
+}
+
+func createRepoPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string, poolParams params.CreatePoolParams) (*params.Pool, error) {
+ createRepoPoolResponse, err := apiCli.Repositories.CreateRepoPool(
+ clientRepositories.NewCreateRepoPoolParams().WithRepoID(repoID).WithBody(poolParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createRepoPoolResponse.Payload, nil
+}
+
+func listRepoPools(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) (params.Pools, error) {
+ listRepoPoolsResponse, err := apiCli.Repositories.ListRepoPools(
+ clientRepositories.NewListRepoPoolsParams().WithRepoID(repoID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listRepoPoolsResponse.Payload, nil
+}
+
+func getRepoPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID, poolID string) (*params.Pool, error) {
+ getRepoPoolResponse, err := apiCli.Repositories.GetRepoPool(
+ clientRepositories.NewGetRepoPoolParams().WithRepoID(repoID).WithPoolID(poolID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getRepoPoolResponse.Payload, nil
+}
+
+func updateRepoPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID, poolID string, poolParams params.UpdatePoolParams) (*params.Pool, error) {
+ updateRepoPoolResponse, err := apiCli.Repositories.UpdateRepoPool(
+ clientRepositories.NewUpdateRepoPoolParams().WithRepoID(repoID).WithPoolID(poolID).WithBody(poolParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateRepoPoolResponse.Payload, nil
+}
+
+func listRepoInstances(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) (params.Instances, error) {
+ listRepoInstancesResponse, err := apiCli.Repositories.ListRepoInstances(
+ clientRepositories.NewListRepoInstancesParams().WithRepoID(repoID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listRepoInstancesResponse.Payload, nil
+}
+
+func deleteRepo(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID string) error {
+ return apiCli.Repositories.DeleteRepo(
+ clientRepositories.NewDeleteRepoParams().WithRepoID(repoID),
+ apiAuthToken)
+}
+
+func deleteRepoPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, repoID, poolID string) error {
+ return apiCli.Repositories.DeleteRepoPool(
+ clientRepositories.NewDeleteRepoPoolParams().WithRepoID(repoID).WithPoolID(poolID),
+ apiAuthToken)
+}
+
+// ////////////////
+// Organizations //
+// ////////////////
+func createOrg(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgParams params.CreateOrgParams) (*params.Organization, error) {
+ createOrgResponse, err := apiCli.Organizations.CreateOrg(
+ clientOrganizations.NewCreateOrgParams().WithBody(orgParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createOrgResponse.Payload, nil
+}
+
+func listOrgs(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Organizations, error) {
+ listOrgsResponse, err := apiCli.Organizations.ListOrgs(
+ clientOrganizations.NewListOrgsParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listOrgsResponse.Payload, nil
+}
+
+func updateOrg(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string, orgParams params.UpdateEntityParams) (*params.Organization, error) {
+ updateOrgResponse, err := apiCli.Organizations.UpdateOrg(
+ clientOrganizations.NewUpdateOrgParams().WithOrgID(orgID).WithBody(orgParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateOrgResponse.Payload, nil
+}
+
+func getOrg(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) (*params.Organization, error) {
+ getOrgResponse, err := apiCli.Organizations.GetOrg(
+ clientOrganizations.NewGetOrgParams().WithOrgID(orgID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getOrgResponse.Payload, nil
+}
+
+func installOrgWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string, webhookParams params.InstallWebhookParams) (*params.HookInfo, error) {
+ installOrgWebhookResponse, err := apiCli.Organizations.InstallOrgWebhook(
+ clientOrganizations.NewInstallOrgWebhookParams().WithOrgID(orgID).WithBody(webhookParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &installOrgWebhookResponse.Payload, nil
+}
+
+func getOrgWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) (*params.HookInfo, error) {
+ getOrgWebhookResponse, err := apiCli.Organizations.GetOrgWebhookInfo(
+ clientOrganizations.NewGetOrgWebhookInfoParams().WithOrgID(orgID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getOrgWebhookResponse.Payload, nil
+}
+
+func uninstallOrgWebhook(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) error {
+ return apiCli.Organizations.UninstallOrgWebhook(
+ clientOrganizations.NewUninstallOrgWebhookParams().WithOrgID(orgID),
+ apiAuthToken)
+}
+
+func createOrgPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string, poolParams params.CreatePoolParams) (*params.Pool, error) {
+ createOrgPoolResponse, err := apiCli.Organizations.CreateOrgPool(
+ clientOrganizations.NewCreateOrgPoolParams().WithOrgID(orgID).WithBody(poolParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &createOrgPoolResponse.Payload, nil
+}
+
+func listOrgPools(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) (params.Pools, error) {
+ listOrgPoolsResponse, err := apiCli.Organizations.ListOrgPools(
+ clientOrganizations.NewListOrgPoolsParams().WithOrgID(orgID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listOrgPoolsResponse.Payload, nil
+}
+
+func getOrgPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID, poolID string) (*params.Pool, error) {
+ getOrgPoolResponse, err := apiCli.Organizations.GetOrgPool(
+ clientOrganizations.NewGetOrgPoolParams().WithOrgID(orgID).WithPoolID(poolID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getOrgPoolResponse.Payload, nil
+}
+
+func updateOrgPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID, poolID string, poolParams params.UpdatePoolParams) (*params.Pool, error) {
+ updateOrgPoolResponse, err := apiCli.Organizations.UpdateOrgPool(
+ clientOrganizations.NewUpdateOrgPoolParams().WithOrgID(orgID).WithPoolID(poolID).WithBody(poolParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updateOrgPoolResponse.Payload, nil
+}
+
+func listOrgInstances(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) (params.Instances, error) {
+ listOrgInstancesResponse, err := apiCli.Organizations.ListOrgInstances(
+ clientOrganizations.NewListOrgInstancesParams().WithOrgID(orgID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listOrgInstancesResponse.Payload, nil
+}
+
+func deleteOrg(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID string) error {
+ return apiCli.Organizations.DeleteOrg(
+ clientOrganizations.NewDeleteOrgParams().WithOrgID(orgID),
+ apiAuthToken)
+}
+
+func deleteOrgPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, orgID, poolID string) error {
+ return apiCli.Organizations.DeleteOrgPool(
+ clientOrganizations.NewDeleteOrgPoolParams().WithOrgID(orgID).WithPoolID(poolID),
+ apiAuthToken)
+}
+
+// ////////////
+// Instances //
+// ////////////
+func listInstances(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Instances, error) {
+ listInstancesResponse, err := apiCli.Instances.ListInstances(
+ clientInstances.NewListInstancesParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listInstancesResponse.Payload, nil
+}
+
+func getInstance(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, instanceID string) (*params.Instance, error) {
+ getInstancesResponse, err := apiCli.Instances.GetInstance(
+ clientInstances.NewGetInstanceParams().WithInstanceName(instanceID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getInstancesResponse.Payload, nil
+}
+
+func deleteInstance(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, instanceID string, forceRemove, bypassGHUnauthorized bool) error {
+ return apiCli.Instances.DeleteInstance(
+ clientInstances.NewDeleteInstanceParams().WithInstanceName(instanceID).WithForceRemove(&forceRemove).WithBypassGHUnauthorized(&bypassGHUnauthorized),
+ apiAuthToken)
+}
+
+// ////////
+// Pools //
+// ////////
+func listPools(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter) (params.Pools, error) {
+ listPoolsResponse, err := apiCli.Pools.ListPools(
+ clientPools.NewListPoolsParams(),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listPoolsResponse.Payload, nil
+}
+
+func getPool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, poolID string) (*params.Pool, error) {
+ getPoolResponse, err := apiCli.Pools.GetPool(
+ clientPools.NewGetPoolParams().WithPoolID(poolID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &getPoolResponse.Payload, nil
+}
+
+func updatePool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, poolID string, poolParams params.UpdatePoolParams) (*params.Pool, error) {
+ updatePoolResponse, err := apiCli.Pools.UpdatePool(
+ clientPools.NewUpdatePoolParams().WithPoolID(poolID).WithBody(poolParams),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return &updatePoolResponse.Payload, nil
+}
+
+func deletePool(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, poolID string) error {
+ return apiCli.Pools.DeletePool(
+ clientPools.NewDeletePoolParams().WithPoolID(poolID),
+ apiAuthToken)
+}
diff --git a/test/integration/config/config.toml b/test/integration/config/config.toml
new file mode 100644
index 00000000..62c2d9d7
--- /dev/null
+++ b/test/integration/config/config.toml
@@ -0,0 +1,40 @@
+[default]
+callback_url = "${GARM_BASE_URL}/api/v1/callbacks"
+metadata_url = "${GARM_BASE_URL}/api/v1/metadata"
+webhook_url = "${GARM_BASE_URL}/webhooks"
+enable_webhook_management = true
+
+[metrics]
+enable = true
+disable_auth = false
+
+[jwt_auth]
+secret = "${JWT_AUTH_SECRET}"
+time_to_live = "8760h"
+
+[apiserver]
+bind = "0.0.0.0"
+port = ${GARM_PORT}
+use_tls = false
+
+[database]
+backend = "sqlite3"
+passphrase = "${DB_PASSPHRASE}"
+[database.sqlite3]
+ db_file = "${GARM_CONFIG_DIR}/garm.db"
+
+[[provider]]
+name = "lxd_local"
+provider_type = "external"
+description = "Local LXD installation"
+ [provider.external]
+ provider_executable = "${LXD_PROVIDER_EXECUTABLE}"
+ config_file = "${LXD_PROVIDER_CONFIG}"
+
+[[provider]]
+name = "test_external"
+description = "external test provider"
+provider_type = "external"
+ [provider.external]
+ config_file = "${GARM_CONFIG_DIR}/test-provider/config"
+ provider_executable = "${GARM_CONFIG_DIR}/test-provider/garm-external-provider"
\ No newline at end of file
diff --git a/test/integration/config/garm-provider-lxd.toml b/test/integration/config/garm-provider-lxd.toml
new file mode 100644
index 00000000..0b2ba3f0
--- /dev/null
+++ b/test/integration/config/garm-provider-lxd.toml
@@ -0,0 +1,21 @@
+unix_socket_path = "/var/snap/lxd/common/lxd/unix.socket"
+include_default_profile = false
+instance_type = "container"
+secure_boot = false
+project_name = "default"
+[image_remotes]
+ [image_remotes.ubuntu]
+ addr = "${LXD_REMOTE_SERVER}"
+ public = true
+ protocol = "simplestreams"
+ skip_verify = false
+ [image_remotes.ubuntu_daily]
+ addr = "https://cloud-images.ubuntu.com/daily"
+ public = true
+ protocol = "simplestreams"
+ skip_verify = false
+ [image_remotes.images]
+ addr = "https://images.linuxcontainers.org"
+ public = true
+ protocol = "simplestreams"
+ skip_verify = false
\ No newline at end of file
diff --git a/test/integration/config/garm.service b/test/integration/config/garm.service
new file mode 100644
index 00000000..9015947e
--- /dev/null
+++ b/test/integration/config/garm.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=GitHub Actions Runner Manager (garm)
+After=multi-user.target
+
+[Service]
+Type=simple
+ExecStart=/usr/local/bin/garm -config ${GARM_CONFIG_FILE}
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=5s
+
+[Install]
+WantedBy=default.target
diff --git a/test/integration/credentials_test.go b/test/integration/credentials_test.go
new file mode 100644
index 00000000..9b9387f6
--- /dev/null
+++ b/test/integration/credentials_test.go
@@ -0,0 +1,246 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "github.com/cloudbase/garm/params"
+)
+
+const (
+ defaultEndpointName string = "github.com"
+ dummyCredentialsName string = "dummy"
+)
+
+func (suite *GarmSuite) TestGithubCredentialsErrorOnDuplicateCredentialsName() {
+ t := suite.T()
+ t.Log("Testing error on duplicate credentials name")
+ creds, err := suite.createDummyCredentials(dummyCredentialsName, defaultEndpointName)
+ suite.NoError(err)
+ t.Cleanup(func() {
+ suite.DeleteGithubCredential(int64(creds.ID)) //nolint:gosec
+ })
+
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ }
+ _, err = createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with duplicate name")
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailsToDeleteWhenInUse() {
+ t := suite.T()
+ t.Log("Testing error when deleting credentials in use")
+ creds, err := suite.createDummyCredentials(dummyCredentialsName, defaultEndpointName)
+ suite.NoError(err)
+
+ orgName := "dummy-owner"
+ repoName := "dummy-repo"
+ createParams := params.CreateRepoParams{
+ Owner: orgName,
+ Name: repoName,
+ CredentialsName: creds.Name,
+ WebhookSecret: "superSecret@123BlaBla",
+ }
+
+ t.Logf("Create repository with owner_name: %s, repo_name: %s", orgName, repoName)
+ repo, err := createRepo(suite.cli, suite.authToken, createParams)
+ suite.NoError(err)
+ t.Cleanup(func() {
+ deleteRepo(suite.cli, suite.authToken, repo.ID)
+ deleteGithubCredentials(suite.cli, suite.authToken, int64(creds.ID)) //nolint:gosec
+ })
+
+ err = deleteGithubCredentials(suite.cli, suite.authToken, int64(creds.ID)) //nolint:gosec
+ suite.Error(err, "expected error when deleting credentials in use")
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailsOnInvalidAuthType() {
+ t := suite.T()
+ t.Log("Testing error on invalid auth type")
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthType("invalid"),
+ PAT: params.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ }
+ _, err := createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with invalid auth type")
+ expectAPIStatusCode(err, 400)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailsWhenAuthTypeParamsAreIncorrect() {
+ t := suite.T()
+ t.Log("Testing error when auth type params are incorrect")
+ privateKeyBytes, err := getTestFileContents("certs/srv-key.pem")
+ suite.NoError(err)
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ App: params.GithubApp{
+ AppID: 123,
+ InstallationID: 456,
+ PrivateKeyBytes: privateKeyBytes,
+ },
+ }
+ _, err = createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with invalid auth type params")
+
+ expectAPIStatusCode(err, 400)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailsWhenAuthTypeParamsAreMissing() {
+ t := suite.T()
+ t.Log("Testing error when auth type params are missing")
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypeApp,
+ }
+ _, err := createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with missing auth type params")
+ expectAPIStatusCode(err, 400)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsUpdateFailsWhenBothPATAndAppAreSupplied() {
+ t := suite.T()
+ t.Log("Testing error when both PAT and App are supplied")
+ creds, err := suite.createDummyCredentials(dummyCredentialsName, defaultEndpointName)
+ suite.NoError(err)
+ t.Cleanup(func() {
+ suite.DeleteGithubCredential(int64(creds.ID)) //nolint:gosec
+ })
+
+ privateKeyBytes, err := getTestFileContents("certs/srv-key.pem")
+ suite.NoError(err)
+ updateCredsParams := params.UpdateGithubCredentialsParams{
+ PAT: ¶ms.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ App: ¶ms.GithubApp{
+ AppID: 123,
+ InstallationID: 456,
+ PrivateKeyBytes: privateKeyBytes,
+ },
+ }
+ _, err = updateGithubCredentials(suite.cli, suite.authToken, int64(creds.ID), updateCredsParams) //nolint:gosec
+ suite.Error(err, "expected error when updating credentials with both PAT and App")
+ expectAPIStatusCode(err, 400)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailWhenAppKeyIsInvalid() {
+ t := suite.T()
+ t.Log("Testing error when app key is invalid")
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypeApp,
+ App: params.GithubApp{
+ AppID: 123,
+ InstallationID: 456,
+ PrivateKeyBytes: []byte("invalid"),
+ },
+ }
+ _, err := createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with invalid app key")
+ expectAPIStatusCode(err, 400)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailWhenEndpointDoesntExist() {
+ t := suite.T()
+ t.Log("Testing error when endpoint doesn't exist")
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: "iDontExist.example.com",
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ }
+ _, err := createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with invalid endpoint")
+ expectAPIStatusCode(err, 404)
+}
+
+func (suite *GarmSuite) TestGithubCredentialsFailsOnDuplicateName() {
+ t := suite.T()
+ t.Log("Testing error on duplicate credentials name")
+ creds, err := suite.createDummyCredentials(dummyCredentialsName, defaultEndpointName)
+ suite.NoError(err)
+ t.Cleanup(func() {
+ suite.DeleteGithubCredential(int64(creds.ID)) //nolint:gosec
+ })
+
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: dummyCredentialsName,
+ Endpoint: defaultEndpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ }
+ _, err = createGithubCredentials(suite.cli, suite.authToken, createCredsParams)
+ suite.Error(err, "expected error when creating credentials with duplicate name")
+ expectAPIStatusCode(err, 409)
+}
+
+func (suite *GarmSuite) createDummyCredentials(name, endpointName string) (*params.ForgeCredentials, error) {
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: name,
+ Endpoint: endpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: "dummy",
+ },
+ }
+ return suite.CreateGithubCredentials(createCredsParams)
+}
+
+func (suite *GarmSuite) CreateGithubCredentials(credentialsParams params.CreateGithubCredentialsParams) (*params.ForgeCredentials, error) {
+ t := suite.T()
+ t.Log("Create GitHub credentials")
+ credentials, err := createGithubCredentials(suite.cli, suite.authToken, credentialsParams)
+ if err != nil {
+ return nil, err
+ }
+
+ return credentials, nil
+}
+
+func (suite *GarmSuite) DeleteGithubCredential(id int64) error {
+ t := suite.T()
+ t.Log("Delete GitHub credential")
+ if err := deleteGithubCredentials(suite.cli, suite.authToken, id); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/test/integration/endpoints.go b/test/integration/endpoints.go
new file mode 100644
index 00000000..720f43d2
--- /dev/null
+++ b/test/integration/endpoints.go
@@ -0,0 +1,62 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package integration
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/cloudbase/garm/params"
+)
+
+func checkEndpointParamsAreEqual(a, b params.ForgeEndpoint) error {
+ if a.Name != b.Name {
+ return fmt.Errorf("endpoint name mismatch")
+ }
+
+ if a.Description != b.Description {
+ return fmt.Errorf("endpoint description mismatch")
+ }
+
+ if a.BaseURL != b.BaseURL {
+ return fmt.Errorf("endpoint base URL mismatch")
+ }
+
+ if a.APIBaseURL != b.APIBaseURL {
+ return fmt.Errorf("endpoint API base URL mismatch")
+ }
+
+ if a.UploadBaseURL != b.UploadBaseURL {
+ return fmt.Errorf("endpoint upload base URL mismatch")
+ }
+
+ if string(a.CACertBundle) != string(b.CACertBundle) {
+ return fmt.Errorf("endpoint CA cert bundle mismatch")
+ }
+ return nil
+}
+
+func getTestFileContents(relPath string) ([]byte, error) {
+ baseDir := os.Getenv("GARM_CHECKOUT_DIR")
+ if baseDir == "" {
+ return nil, fmt.Errorf("ariable GARM_CHECKOUT_DIR not set")
+ }
+ contents, err := os.ReadFile(filepath.Join(baseDir, "testdata", relPath))
+ if err != nil {
+ return nil, err
+ }
+ return contents, nil
+}
diff --git a/test/integration/endpoints_test.go b/test/integration/endpoints_test.go
new file mode 100644
index 00000000..fe0dd160
--- /dev/null
+++ b/test/integration/endpoints_test.go
@@ -0,0 +1,226 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package integration
+
+import (
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) TestGithubEndpointOperations() {
+ t := suite.T()
+ t.Log("Testing endpoint operations")
+ suite.MustDefaultGithubEndpoint()
+
+ caBundle, err := getTestFileContents("certs/srv-pub.pem")
+ suite.NoError(err)
+
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "test-endpoint",
+ Description: "Test endpoint",
+ BaseURL: "https://ghes.example.com",
+ APIBaseURL: "https://api.ghes.example.com/",
+ UploadBaseURL: "https://uploads.ghes.example.com/",
+ CACertBundle: caBundle,
+ }
+
+ endpoint, err := suite.CreateGithubEndpoint(endpointParams)
+ suite.NoError(err)
+ suite.Equal(endpoint.Name, endpointParams.Name, "Endpoint name mismatch")
+ suite.Equal(endpoint.Description, endpointParams.Description, "Endpoint description mismatch")
+ suite.Equal(endpoint.BaseURL, endpointParams.BaseURL, "Endpoint base URL mismatch")
+ suite.Equal(endpoint.APIBaseURL, endpointParams.APIBaseURL, "Endpoint API base URL mismatch")
+ suite.Equal(endpoint.UploadBaseURL, endpointParams.UploadBaseURL, "Endpoint upload base URL mismatch")
+ suite.Equal(string(endpoint.CACertBundle), string(caBundle), "Endpoint CA cert bundle mismatch")
+
+ endpoint2 := suite.GetGithubEndpoint(endpointParams.Name)
+ suite.NotNil(endpoint, "endpoint is nil")
+ suite.NotNil(endpoint2, "endpoint2 is nil")
+
+ err = checkEndpointParamsAreEqual(*endpoint, *endpoint2)
+ suite.NoError(err, "endpoint params are not equal")
+ endpoints := suite.ListGithubEndpoints()
+ suite.NoError(err, "error listing github endpoints")
+ var found bool
+ for _, ep := range endpoints {
+ if ep.Name == endpointParams.Name {
+ checkEndpointParamsAreEqual(*endpoint, ep)
+ found = true
+ break
+ }
+ }
+ suite.Equal(found, true, "endpoint not found in list")
+
+ err = suite.DeleteGithubEndpoint(endpoint.Name)
+ suite.NoError(err, "error deleting github endpoint")
+}
+
+func (suite *GarmSuite) TestGithubEndpointMustFailToDeleteDefaultGithubEndpoint() {
+ t := suite.T()
+ t.Log("Testing error when deleting default github.com endpoint")
+ err := deleteGithubEndpoint(suite.cli, suite.authToken, "github.com")
+ suite.Error(err, "expected error when attempting to delete the default github.com endpoint")
+}
+
+func (suite *GarmSuite) TestGithubEndpointFailsOnInvalidCABundle() {
+ t := suite.T()
+ t.Log("Testing endpoint creation with invalid CA cert bundle")
+ badCABundle, err := getTestFileContents("certs/srv-key.pem")
+ suite.NoError(err, "error reading CA cert bundle")
+
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "dummy",
+ Description: "Dummy endpoint",
+ BaseURL: "https://ghes.example.com",
+ APIBaseURL: "https://api.ghes.example.com/",
+ UploadBaseURL: "https://uploads.ghes.example.com/",
+ CACertBundle: badCABundle,
+ }
+
+ _, err = createGithubEndpoint(suite.cli, suite.authToken, endpointParams)
+ suite.Error(err, "expected error when creating endpoint with invalid CA cert bundle")
+}
+
+func (suite *GarmSuite) TestGithubEndpointDeletionFailsWhenCredentialsExist() {
+ t := suite.T()
+ t.Log("Testing endpoint deletion when credentials exist")
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "dummy",
+ Description: "Dummy endpoint",
+ BaseURL: "https://ghes.example.com",
+ APIBaseURL: "https://api.ghes.example.com/",
+ UploadBaseURL: "https://uploads.ghes.example.com/",
+ }
+
+ endpoint, err := suite.CreateGithubEndpoint(endpointParams)
+ suite.NoError(err, "error creating github endpoint")
+ creds, err := suite.createDummyCredentials("test-creds", endpoint.Name)
+ suite.NoError(err, "error creating dummy credentials")
+
+ err = deleteGithubEndpoint(suite.cli, suite.authToken, endpoint.Name)
+ suite.Error(err, "expected error when deleting endpoint with credentials")
+
+ err = suite.DeleteGithubCredential(int64(creds.ID)) //nolint:gosec
+ suite.NoError(err, "error deleting credentials")
+ err = suite.DeleteGithubEndpoint(endpoint.Name)
+ suite.NoError(err, "error deleting endpoint")
+}
+
+func (suite *GarmSuite) TestGithubEndpointFailsOnDuplicateName() {
+ t := suite.T()
+ t.Log("Testing endpoint creation with duplicate name")
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: "github.com",
+ Description: "Dummy endpoint",
+ BaseURL: "https://ghes.example.com",
+ APIBaseURL: "https://api.ghes.example.com/",
+ UploadBaseURL: "https://uploads.ghes.example.com/",
+ }
+
+ _, err := createGithubEndpoint(suite.cli, suite.authToken, endpointParams)
+ suite.Error(err, "expected error when creating endpoint with duplicate name")
+}
+
+func (suite *GarmSuite) TestGithubEndpointUpdateEndpoint() {
+ t := suite.T()
+ t.Log("Testing endpoint update")
+ endpoint, err := suite.createDummyEndpoint("dummy")
+ suite.NoError(err, "error creating dummy endpoint")
+ t.Cleanup(func() {
+ suite.DeleteGithubEndpoint(endpoint.Name)
+ })
+
+ newDescription := "Updated description"
+ newBaseURL := "https://ghes2.example.com"
+ newAPIBaseURL := "https://api.ghes2.example.com/"
+ newUploadBaseURL := "https://uploads.ghes2.example.com/"
+ newCABundle, err := getTestFileContents("certs/srv-pub.pem")
+ suite.NoError(err, "error reading CA cert bundle")
+
+ updateParams := params.UpdateGithubEndpointParams{
+ Description: &newDescription,
+ BaseURL: &newBaseURL,
+ APIBaseURL: &newAPIBaseURL,
+ UploadBaseURL: &newUploadBaseURL,
+ CACertBundle: newCABundle,
+ }
+
+ updated, err := updateGithubEndpoint(suite.cli, suite.authToken, endpoint.Name, updateParams)
+ suite.NoError(err, "error updating github endpoint")
+
+ suite.Equal(updated.Name, endpoint.Name, "Endpoint name mismatch")
+ suite.Equal(updated.Description, newDescription, "Endpoint description mismatch")
+ suite.Equal(updated.BaseURL, newBaseURL, "Endpoint base URL mismatch")
+ suite.Equal(updated.APIBaseURL, newAPIBaseURL, "Endpoint API base URL mismatch")
+ suite.Equal(updated.UploadBaseURL, newUploadBaseURL, "Endpoint upload base URL mismatch")
+ suite.Equal(string(updated.CACertBundle), string(newCABundle), "Endpoint CA cert bundle mismatch")
+}
+
+func (suite *GarmSuite) MustDefaultGithubEndpoint() {
+ ep := suite.GetGithubEndpoint("github.com")
+
+ suite.NotNil(ep, "default GitHub endpoint not found")
+ suite.Equal(ep.Name, "github.com", "default GitHub endpoint name mismatch")
+}
+
+func (suite *GarmSuite) GetGithubEndpoint(name string) *params.ForgeEndpoint {
+ t := suite.T()
+ t.Log("Get GitHub endpoint")
+ endpoint, err := getGithubEndpoint(suite.cli, suite.authToken, name)
+ suite.NoError(err, "error getting GitHub endpoint")
+
+ return endpoint
+}
+
+func (suite *GarmSuite) CreateGithubEndpoint(params params.CreateGithubEndpointParams) (*params.ForgeEndpoint, error) {
+ t := suite.T()
+ t.Log("Create GitHub endpoint")
+ endpoint, err := createGithubEndpoint(suite.cli, suite.authToken, params)
+ suite.NoError(err, "error creating GitHub endpoint")
+
+ return endpoint, nil
+}
+
+func (suite *GarmSuite) DeleteGithubEndpoint(name string) error {
+ t := suite.T()
+ t.Log("Delete GitHub endpoint")
+ err := deleteGithubEndpoint(suite.cli, suite.authToken, name)
+ suite.NoError(err, "error deleting GitHub endpoint")
+
+ return nil
+}
+
+func (suite *GarmSuite) ListGithubEndpoints() params.ForgeEndpoints {
+ t := suite.T()
+ t.Log("List GitHub endpoints")
+ endpoints, err := listGithubEndpoints(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing GitHub endpoints")
+
+ return endpoints
+}
+
+func (suite *GarmSuite) createDummyEndpoint(name string) (*params.ForgeEndpoint, error) {
+ endpointParams := params.CreateGithubEndpointParams{
+ Name: name,
+ Description: "Dummy endpoint",
+ BaseURL: "https://ghes.example.com",
+ APIBaseURL: "https://api.ghes.example.com/",
+ UploadBaseURL: "https://uploads.ghes.example.com/",
+ }
+
+ return suite.CreateGithubEndpoint(endpointParams)
+}
diff --git a/test/integration/external_provider_test.go b/test/integration/external_provider_test.go
new file mode 100644
index 00000000..2c85eb35
--- /dev/null
+++ b/test/integration/external_provider_test.go
@@ -0,0 +1,184 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/go-openapi/runtime"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/client"
+ clientInstances "github.com/cloudbase/garm/client/instances"
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) TestExternalProvider() {
+ t := suite.T()
+ t.Log("Testing external provider")
+ repoPoolParams2 := params.CreatePoolParams{
+ MaxRunners: 2,
+ MinIdleRunners: 0,
+ Flavor: "default",
+ Image: "ubuntu:24.04",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ ProviderName: "test_external",
+ Tags: []string{"repo-runner-2"},
+ Enabled: true,
+ }
+ repoPool2 := suite.CreateRepoPool(suite.repo.ID, repoPoolParams2)
+ newParams := suite.UpdateRepoPool(suite.repo.ID, repoPool2.ID, repoPoolParams2.MaxRunners, 1)
+ t.Logf("Updated repo pool with pool_id %s with new_params %+v", repoPool2.ID, newParams)
+
+ err := suite.WaitPoolInstances(repoPool2.ID, commonParams.InstanceRunning, params.RunnerPending, 1*time.Minute)
+ suite.NoError(err, "error waiting for pool instances to be running")
+ repoPool2 = suite.GetRepoPool(suite.repo.ID, repoPool2.ID)
+ suite.DisableRepoPool(suite.repo.ID, repoPool2.ID)
+ suite.DeleteInstance(repoPool2.Instances[0].Name, false, false)
+ err = suite.WaitPoolInstances(repoPool2.ID, commonParams.InstancePendingDelete, params.RunnerPending, 1*time.Minute)
+ suite.NoError(err, "error waiting for pool instances to be pending delete")
+ suite.DeleteInstance(repoPool2.Instances[0].Name, true, false) // delete instance with forceRemove
+ err = suite.WaitInstanceToBeRemoved(repoPool2.Instances[0].Name, 1*time.Minute)
+ suite.NoError(err, "error waiting for instance to be removed")
+ suite.DeleteRepoPool(suite.repo.ID, repoPool2.ID)
+}
+
+func (suite *GarmSuite) WaitPoolInstances(poolID string, status commonParams.InstanceStatus, runnerStatus params.RunnerStatus, timeout time.Duration) error {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+
+ pool, err := getPool(suite.cli, suite.authToken, poolID)
+ if err != nil {
+ return err
+ }
+
+ t.Logf("Waiting for pool instances with pool_id %s to reach desired status %v and desired_runner_status %v", poolID, status, runnerStatus)
+ for timeWaited < timeout {
+ poolInstances, err := listPoolInstances(suite.cli, suite.authToken, poolID)
+ if err != nil {
+ return err
+ }
+
+ instancesCount := 0
+ for _, instance := range poolInstances {
+ if instance.Status == status && instance.RunnerStatus == runnerStatus {
+ instancesCount++
+ }
+ }
+
+ t.Logf(
+ "Pool instance with pool_id %s reached status %v and runner_status %v, desired_instance_count %d, pool_instance_count %d",
+ poolID, status, runnerStatus, instancesCount,
+ len(poolInstances))
+ if pool.MinIdleRunnersAsInt() == instancesCount {
+ return nil
+ }
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ err = suite.dumpPoolInstancesDetails(pool.ID)
+ suite.NoError(err, "error dumping pool instances details")
+
+ return fmt.Errorf("timeout waiting for pool %s instances to reach status: %s and runner status: %s", poolID, status, runnerStatus)
+}
+
+func (suite *GarmSuite) dumpPoolInstancesDetails(poolID string) error {
+ t := suite.T()
+ pool, err := getPool(suite.cli, suite.authToken, poolID)
+ if err != nil {
+ return err
+ }
+ if err := printJSONResponse(pool); err != nil {
+ return err
+ }
+ for _, instance := range pool.Instances {
+ instanceDetails, err := getInstance(suite.cli, suite.authToken, instance.Name)
+ if err != nil {
+ return err
+ }
+ t.Logf("Instance details: instance_name %s", instance.Name)
+ if err := printJSONResponse(instanceDetails); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (suite *GarmSuite) DisableRepoPool(repoID, repoPoolID string) {
+ t := suite.T()
+ t.Logf("Disable repo pool with repo_id %s and pool_id %s", repoID, repoPoolID)
+ enabled := false
+ poolParams := params.UpdatePoolParams{Enabled: &enabled}
+ _, err := updateRepoPool(suite.cli, suite.authToken, repoID, repoPoolID, poolParams)
+ suite.NoError(err, "error disabling repository pool")
+}
+
+func (suite *GarmSuite) DeleteInstance(name string, forceRemove, bypassGHUnauthorized bool) {
+ t := suite.T()
+ t.Logf("Delete instance %s with force_remove %t", name, forceRemove)
+ err := deleteInstance(suite.cli, suite.authToken, name, forceRemove, bypassGHUnauthorized)
+ suite.NoError(err, "error deleting instance", name)
+ t.Logf("Instance deletion initiated for instance %s", name)
+}
+
+func (suite *GarmSuite) WaitInstanceToBeRemoved(name string, timeout time.Duration) error {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+ var instance *params.Instance
+
+ t.Logf("Waiting for instance %s to be removed", name)
+ for timeWaited < timeout {
+ instances, err := listInstances(suite.cli, suite.authToken)
+ if err != nil {
+ return err
+ }
+
+ instance = nil
+ for k, v := range instances {
+ if v.Name == name {
+ instance = &instances[k]
+ break
+ }
+ }
+ if instance == nil {
+ // The instance is not found in the list. We can safely assume
+ // that it is removed
+ return nil
+ }
+
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ if err := printJSONResponse(*instance); err != nil {
+ return err
+ }
+ return fmt.Errorf("instance %s was not removed within the timeout", name)
+}
+
+func listPoolInstances(apiCli *client.GarmAPI, apiAuthToken runtime.ClientAuthInfoWriter, poolID string) (params.Instances, error) {
+ listPoolInstancesResponse, err := apiCli.Instances.ListPoolInstances(
+ clientInstances.NewListPoolInstancesParams().WithPoolID(poolID),
+ apiAuthToken)
+ if err != nil {
+ return nil, err
+ }
+ return listPoolInstancesResponse.Payload, nil
+}
diff --git a/test/integration/gh_cleanup/main.go b/test/integration/gh_cleanup/main.go
new file mode 100644
index 00000000..86d39ea7
--- /dev/null
+++ b/test/integration/gh_cleanup/main.go
@@ -0,0 +1,188 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "os"
+
+ "github.com/google/go-github/v72/github"
+ "golang.org/x/oauth2"
+)
+
+var (
+ orgName = os.Getenv("ORG_NAME")
+ repoName = os.Getenv("REPO_NAME")
+
+ ghToken = os.Getenv("GH_TOKEN")
+)
+
+func main() {
+ controllerID, ctrlIDFound := os.LookupEnv("GARM_CONTROLLER_ID")
+ if ctrlIDFound {
+ _ = GhOrgRunnersCleanup(ghToken, orgName, controllerID)
+ _ = GhRepoRunnersCleanup(ghToken, orgName, repoName, controllerID)
+ } else {
+ slog.Warn("Env variable GARM_CONTROLLER_ID is not set, skipping GitHub runners cleanup")
+ }
+
+ baseURL, baseURLFound := os.LookupEnv("GARM_BASE_URL")
+ if ctrlIDFound && baseURLFound {
+ webhookURL := fmt.Sprintf("%s/webhooks/%s", baseURL, controllerID)
+ _ = GhOrgWebhookCleanup(ghToken, webhookURL, orgName)
+ _ = GhRepoWebhookCleanup(ghToken, webhookURL, orgName, repoName)
+ } else {
+ slog.Warn("Env variables GARM_CONTROLLER_ID & GARM_BASE_URL are not set, skipping webhooks cleanup")
+ }
+}
+
+func GhOrgRunnersCleanup(ghToken, orgName, controllerID string) error {
+ slog.Info("Cleanup Github runners", "controller_id", controllerID, "org_name", orgName)
+
+ client := getGithubClient(ghToken)
+ ghOrgRunners, _, err := client.Actions.ListOrganizationRunners(context.Background(), orgName, nil)
+ if err != nil {
+ return err
+ }
+
+ // Remove organization runners
+ controllerLabel := fmt.Sprintf("runner-controller-id:%s", controllerID)
+ for _, orgRunner := range ghOrgRunners.Runners {
+ for _, label := range orgRunner.Labels {
+ if label.GetName() == controllerLabel {
+ if _, err := client.Actions.RemoveOrganizationRunner(context.Background(), orgName, orgRunner.GetID()); err != nil {
+ // We don't fail if we can't remove a single runner. This
+ // is a best effort to try and remove all the orphan runners.
+ slog.With(slog.Any("error", err)).Info("Failed to remove organization runner", "org_runner", orgRunner.GetName())
+ break
+ }
+ slog.Info("Removed organization runner", "org_runner", orgRunner.GetName())
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+func GhRepoRunnersCleanup(ghToken, orgName, repoName, controllerID string) error {
+ slog.Info("Cleanup Github runners", "controller_id", controllerID, "org_name", orgName, "repo_name", repoName)
+
+ client := getGithubClient(ghToken)
+ ghRepoRunners, _, err := client.Actions.ListRunners(context.Background(), orgName, repoName, nil)
+ if err != nil {
+ return err
+ }
+
+ // Remove repository runners
+ controllerLabel := fmt.Sprintf("runner-controller-id:%s", controllerID)
+ for _, repoRunner := range ghRepoRunners.Runners {
+ for _, label := range repoRunner.Labels {
+ if label.GetName() == controllerLabel {
+ if _, err := client.Actions.RemoveRunner(context.Background(), orgName, repoName, repoRunner.GetID()); err != nil {
+ // We don't fail if we can't remove a single runner. This
+ // is a best effort to try and remove all the orphan runners.
+ slog.With(slog.Any("error", err)).Error("Failed to remove repository runner", "runner_name", repoRunner.GetName())
+ break
+ }
+ slog.Info("Removed repository runner", "runner_name", repoRunner.GetName())
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+func GhOrgWebhookCleanup(ghToken, webhookURL, orgName string) error {
+ slog.Info("Cleanup Github webhook", "webhook_url", webhookURL, "org_name", orgName)
+ hook, err := getGhOrgWebhook(webhookURL, ghToken, orgName)
+ if err != nil {
+ return err
+ }
+
+ // Remove organization webhook
+ if hook != nil {
+ client := getGithubClient(ghToken)
+ if _, err := client.Organizations.DeleteHook(context.Background(), orgName, hook.GetID()); err != nil {
+ return err
+ }
+ slog.Info("Github webhook removed", "webhook_url", webhookURL, "org_name", orgName)
+ }
+
+ return nil
+}
+
+func GhRepoWebhookCleanup(ghToken, webhookURL, orgName, repoName string) error {
+ slog.Info("Cleanup Github webhook", "webhook_url", webhookURL, "org_name", orgName, "repo_name", repoName)
+
+ hook, err := getGhRepoWebhook(webhookURL, ghToken, orgName, repoName)
+ if err != nil {
+ return err
+ }
+
+ // Remove repository webhook
+ if hook != nil {
+ client := getGithubClient(ghToken)
+ if _, err := client.Repositories.DeleteHook(context.Background(), orgName, repoName, hook.GetID()); err != nil {
+ return err
+ }
+ slog.Info("Github webhook with", "webhook_url", webhookURL, "org_name", orgName, "repo_name", repoName)
+ }
+
+ return nil
+}
+
+func getGhOrgWebhook(url, ghToken, orgName string) (*github.Hook, error) {
+ client := getGithubClient(ghToken)
+ ghOrgHooks, _, err := client.Organizations.ListHooks(context.Background(), orgName, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, hook := range ghOrgHooks {
+ hookURL := hook.Config.GetURL()
+ if hookURL == url {
+ return hook, nil
+ }
+ }
+
+ return nil, nil
+}
+
+func getGhRepoWebhook(url, ghToken, orgName, repoName string) (*github.Hook, error) {
+ client := getGithubClient(ghToken)
+ ghRepoHooks, _, err := client.Repositories.ListHooks(context.Background(), orgName, repoName, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, hook := range ghRepoHooks {
+ hookURL := hook.Config.GetURL()
+ if hookURL == url {
+ return hook, nil
+ }
+ }
+
+ return nil, nil
+}
+
+func getGithubClient(oauthToken string) *github.Client {
+ ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: oauthToken})
+ tc := oauth2.NewClient(context.Background(), ts)
+ return github.NewClient(tc)
+}
diff --git a/test/integration/jobs_test.go b/test/integration/jobs_test.go
new file mode 100644
index 00000000..4b2d9d5d
--- /dev/null
+++ b/test/integration/jobs_test.go
@@ -0,0 +1,181 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/google/go-github/v72/github"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) TestWorkflowJobs() {
+ suite.TriggerWorkflow(suite.ghToken, orgName, repoName, workflowFileName, "org-runner")
+ suite.ValidateJobLifecycle("org-runner")
+
+ suite.TriggerWorkflow(suite.ghToken, orgName, repoName, workflowFileName, "repo-runner")
+ suite.ValidateJobLifecycle("repo-runner")
+}
+
+func (suite *GarmSuite) TriggerWorkflow(ghToken, orgName, repoName, workflowFileName, labelName string) {
+ t := suite.T()
+ t.Logf("Trigger workflow with label %s", labelName)
+
+ client := getGithubClient(ghToken)
+ eventReq := github.CreateWorkflowDispatchEventRequest{
+ Ref: "main",
+ Inputs: map[string]interface{}{
+ "sleep_time": "50",
+ "runner_label": labelName,
+ },
+ }
+ _, err := client.Actions.CreateWorkflowDispatchEventByFileName(context.Background(), orgName, repoName, workflowFileName, eventReq)
+ suite.NoError(err, "error triggering workflow")
+}
+
+func (suite *GarmSuite) ValidateJobLifecycle(label string) {
+ t := suite.T()
+ t.Logf("Validate GARM job lifecycle with label %s", label)
+
+ // wait for job list to be updated
+ job, err := suite.waitLabelledJob(label, 4*time.Minute)
+ suite.NoError(err, "error waiting for job to be created")
+
+ // check expected job status
+ job, err = suite.waitJobStatus(job.ID, params.JobStatusQueued, 4*time.Minute)
+ suite.NoError(err, "error waiting for job to be queued")
+
+ job, err = suite.waitJobStatus(job.ID, params.JobStatusInProgress, 4*time.Minute)
+ suite.NoError(err, "error waiting for job to be in progress")
+
+ // check expected instance status
+ instance, err := suite.waitInstanceStatus(job.RunnerName, commonParams.InstanceRunning, params.RunnerActive, 5*time.Minute)
+ suite.NoError(err, "error waiting for instance to be running")
+
+ // wait for job to be completed
+ _, err = suite.waitJobStatus(job.ID, params.JobStatusCompleted, 4*time.Minute)
+ suite.NoError(err, "error waiting for job to be completed")
+
+ // wait for instance to be removed
+ err = suite.WaitInstanceToBeRemoved(instance.Name, 5*time.Minute)
+ suite.NoError(err, "error waiting for instance to be removed")
+
+ // wait for GARM to rebuild the pool running idle instances
+ err = suite.WaitPoolInstances(instance.PoolID, commonParams.InstanceRunning, params.RunnerIdle, 5*time.Minute)
+ suite.NoError(err, "error waiting for pool instances to be running idle")
+}
+
+func (suite *GarmSuite) waitLabelledJob(label string, timeout time.Duration) (*params.Job, error) {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+ var jobs params.Jobs
+ var err error
+
+ t.Logf("Waiting for job with label %s", label)
+ for timeWaited < timeout {
+ jobs, err = listJobs(suite.cli, suite.authToken)
+ if err != nil {
+ return nil, err
+ }
+ for _, job := range jobs {
+ for _, jobLabel := range job.Labels {
+ if jobLabel == label {
+ return &job, err
+ }
+ }
+ }
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ if err := printJSONResponse(jobs); err != nil {
+ return nil, err
+ }
+ return nil, fmt.Errorf("failed to wait job with label %s", label)
+}
+
+func (suite *GarmSuite) waitJobStatus(id int64, status params.JobStatus, timeout time.Duration) (*params.Job, error) {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+ var job *params.Job
+
+ t.Logf("Waiting for job %d to reach status %v", id, status)
+ for timeWaited < timeout {
+ jobs, err := listJobs(suite.cli, suite.authToken)
+ if err != nil {
+ return nil, err
+ }
+
+ job = nil
+ for k, v := range jobs {
+ if v.ID == id {
+ job = &jobs[k]
+ break
+ }
+ }
+
+ if job == nil {
+ if status == params.JobStatusCompleted {
+ // The job is not found in the list. We can safely assume
+ // that it is completed
+ return nil, nil
+ }
+ // if the job is not found, and expected status is not "completed",
+ // we need to error out.
+ return nil, fmt.Errorf("job %d not found, expected to be found in status %s", id, status)
+ } else if job.Status == string(status) {
+ return job, nil
+ }
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ if err := printJSONResponse(*job); err != nil {
+ return nil, err
+ }
+ return nil, fmt.Errorf("timeout waiting for job %d to reach status %s", id, status)
+}
+
+func (suite *GarmSuite) waitInstanceStatus(name string, status commonParams.InstanceStatus, runnerStatus params.RunnerStatus, timeout time.Duration) (*params.Instance, error) {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+ var instance *params.Instance
+ var err error
+
+ t.Logf("Waiting for instance %s to reach desired status %v and desired runner status %v", name, status, runnerStatus)
+ for timeWaited < timeout {
+ instance, err = getInstance(suite.cli, suite.authToken, name)
+ if err != nil {
+ return nil, err
+ }
+ t.Logf("Instance %s has status %v and runner status %v", name, instance.Status, instance.RunnerStatus)
+ if instance.Status == status && instance.RunnerStatus == runnerStatus {
+ return instance, nil
+ }
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ if err := printJSONResponse(*instance); err != nil {
+ return nil, err
+ }
+ return nil, fmt.Errorf("timeout waiting for instance %s status to reach status %s and runner status %s", name, status, runnerStatus)
+}
diff --git a/test/integration/list_info_test.go b/test/integration/list_info_test.go
new file mode 100644
index 00000000..ddb3ff86
--- /dev/null
+++ b/test/integration/list_info_test.go
@@ -0,0 +1,85 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) TestGetControllerInfo() {
+ controllerInfo := suite.GetControllerInfo()
+ suite.NotEmpty(controllerInfo.ControllerID, "controller ID is empty")
+}
+
+func (suite *GarmSuite) GetMetricsToken() {
+ t := suite.T()
+ t.Log("Get metrics token")
+ metricsToken, err := getMetricsToken(suite.cli, suite.authToken)
+ suite.NoError(err, "error getting metrics token")
+ suite.NotEmpty(metricsToken, "metrics token is empty")
+}
+
+func (suite *GarmSuite) GetControllerInfo() *params.ControllerInfo {
+ t := suite.T()
+ t.Log("Get controller info")
+ controllerInfo, err := getControllerInfo(suite.cli, suite.authToken)
+ suite.NoError(err, "error getting controller info")
+ err = suite.appendCtrlInfoToGitHubEnv(&controllerInfo)
+ suite.NoError(err, "error appending controller info to GitHub env")
+ err = printJSONResponse(controllerInfo)
+ suite.NoError(err, "error printing controller info")
+ return &controllerInfo
+}
+
+func (suite *GarmSuite) TestListCredentials() {
+ t := suite.T()
+ t.Log("List credentials")
+ credentials, err := listCredentials(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing credentials")
+ suite.NotEmpty(credentials, "credentials list is empty")
+}
+
+func (suite *GarmSuite) TestListProviders() {
+ t := suite.T()
+ t.Log("List providers")
+ providers, err := listProviders(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing providers")
+ suite.NotEmpty(providers, "providers list is empty")
+}
+
+func (suite *GarmSuite) appendCtrlInfoToGitHubEnv(controllerInfo *params.ControllerInfo) error {
+ t := suite.T()
+ envFile, found := os.LookupEnv("GITHUB_ENV")
+ if !found {
+ t.Log("GITHUB_ENV not set, skipping appending controller info")
+ return nil
+ }
+ file, err := os.OpenFile(envFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0o644)
+ if err != nil {
+ return err
+ }
+ t.Cleanup(func() {
+ file.Close()
+ })
+ if _, err := file.WriteString(fmt.Sprintf("export GARM_CONTROLLER_ID=%s\n", controllerInfo.ControllerID)); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/test/integration/organizations_test.go b/test/integration/organizations_test.go
new file mode 100644
index 00000000..d587f4a5
--- /dev/null
+++ b/test/integration/organizations_test.go
@@ -0,0 +1,205 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/google/go-github/v72/github"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) TestOrganizations() {
+ organization := suite.CreateOrg(orgName, suite.credentialsName, orgWebhookSecret)
+ org := suite.UpdateOrg(organization.ID, fmt.Sprintf("%s-clone", suite.credentialsName))
+ suite.NotEqual(organization, org, "organization not updated")
+ orgHookInfo := suite.InstallOrgWebhook(org.ID)
+ suite.ValidateOrgWebhookInstalled(suite.ghToken, orgHookInfo.URL, orgName)
+ suite.UninstallOrgWebhook(org.ID)
+ suite.ValidateOrgWebhookUninstalled(suite.ghToken, orgHookInfo.URL, orgName)
+ _ = suite.InstallOrgWebhook(org.ID)
+ suite.ValidateOrgWebhookInstalled(suite.ghToken, orgHookInfo.URL, orgName)
+
+ orgPoolParams := params.CreatePoolParams{
+ MaxRunners: 2,
+ MinIdleRunners: 0,
+ Flavor: "default",
+ Image: "ubuntu:24.04",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ ProviderName: "lxd_local",
+ Tags: []string{"org-runner"},
+ Enabled: true,
+ }
+ orgPool := suite.CreateOrgPool(org.ID, orgPoolParams)
+ orgPoolGot := suite.GetOrgPool(org.ID, orgPool.ID)
+ suite.Equal(orgPool, orgPoolGot, "organization pool mismatch")
+ suite.DeleteOrgPool(org.ID, orgPool.ID)
+
+ orgPool = suite.CreateOrgPool(org.ID, orgPoolParams)
+ orgPoolUpdated := suite.UpdateOrgPool(org.ID, orgPool.ID, orgPoolParams.MaxRunners, 1)
+ suite.NotEqual(orgPool, orgPoolUpdated, "organization pool not updated")
+
+ suite.WaitOrgRunningIdleInstances(org.ID, 6*time.Minute)
+}
+
+func (suite *GarmSuite) CreateOrg(orgName, credentialsName, orgWebhookSecret string) *params.Organization {
+ t := suite.T()
+ t.Logf("Create org with org_name %s", orgName)
+ orgParams := params.CreateOrgParams{
+ Name: orgName,
+ CredentialsName: credentialsName,
+ WebhookSecret: orgWebhookSecret,
+ }
+ org, err := createOrg(suite.cli, suite.authToken, orgParams)
+ suite.NoError(err, "error creating organization")
+ return org
+}
+
+func (suite *GarmSuite) UpdateOrg(id, credentialsName string) *params.Organization {
+ t := suite.T()
+ t.Logf("Update org with org_id %s", id)
+ updateParams := params.UpdateEntityParams{
+ CredentialsName: credentialsName,
+ }
+ org, err := updateOrg(suite.cli, suite.authToken, id, updateParams)
+ suite.NoError(err, "error updating organization")
+ return org
+}
+
+func (suite *GarmSuite) InstallOrgWebhook(id string) *params.HookInfo {
+ t := suite.T()
+ t.Logf("Install org webhook with org_id %s", id)
+ webhookParams := params.InstallWebhookParams{
+ WebhookEndpointType: params.WebhookEndpointDirect,
+ }
+ _, err := installOrgWebhook(suite.cli, suite.authToken, id, webhookParams)
+ suite.NoError(err, "error installing organization webhook")
+ webhookInfo, err := getOrgWebhook(suite.cli, suite.authToken, id)
+ suite.NoError(err, "error getting organization webhook")
+ return webhookInfo
+}
+
+func (suite *GarmSuite) ValidateOrgWebhookInstalled(ghToken, url, orgName string) {
+ hook, err := getGhOrgWebhook(url, ghToken, orgName)
+ suite.NoError(err, "error getting github webhook")
+ suite.NotNil(hook, "github webhook with url %s, for org %s was not properly installed", url, orgName)
+}
+
+func getGhOrgWebhook(url, ghToken, orgName string) (*github.Hook, error) {
+ client := getGithubClient(ghToken)
+ ghOrgHooks, _, err := client.Organizations.ListHooks(context.Background(), orgName, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, hook := range ghOrgHooks {
+ hookURL := hook.Config.GetURL()
+ if hookURL == url {
+ return hook, nil
+ }
+ }
+
+ return nil, nil
+}
+
+func (suite *GarmSuite) UninstallOrgWebhook(id string) {
+ t := suite.T()
+ t.Logf("Uninstall org webhook with org_id %s", id)
+ err := uninstallOrgWebhook(suite.cli, suite.authToken, id)
+ suite.NoError(err, "error uninstalling organization webhook")
+}
+
+func (suite *GarmSuite) ValidateOrgWebhookUninstalled(ghToken, url, orgName string) {
+ hook, err := getGhOrgWebhook(url, ghToken, orgName)
+ suite.NoError(err, "error getting github webhook")
+ suite.Nil(hook, "github webhook with url %s, for org %s was not properly uninstalled", url, orgName)
+}
+
+func (suite *GarmSuite) CreateOrgPool(orgID string, poolParams params.CreatePoolParams) *params.Pool {
+ t := suite.T()
+ t.Logf("Create org pool with org_id %s", orgID)
+ pool, err := createOrgPool(suite.cli, suite.authToken, orgID, poolParams)
+ suite.NoError(err, "error creating organization pool")
+ return pool
+}
+
+func (suite *GarmSuite) GetOrgPool(orgID, orgPoolID string) *params.Pool {
+ t := suite.T()
+ t.Logf("Get org pool with org_id %s and pool_id %s", orgID, orgPoolID)
+ pool, err := getOrgPool(suite.cli, suite.authToken, orgID, orgPoolID)
+ suite.NoError(err, "error getting organization pool")
+ return pool
+}
+
+func (suite *GarmSuite) DeleteOrgPool(orgID, orgPoolID string) {
+ t := suite.T()
+ t.Logf("Delete org pool with org_id %s and pool_id %s", orgID, orgPoolID)
+ err := deleteOrgPool(suite.cli, suite.authToken, orgID, orgPoolID)
+ suite.NoError(err, "error deleting organization pool")
+}
+
+func (suite *GarmSuite) UpdateOrgPool(orgID, orgPoolID string, maxRunners, minIdleRunners uint) *params.Pool {
+ t := suite.T()
+ t.Logf("Update org pool with org_id %s and pool_id %s", orgID, orgPoolID)
+ poolParams := params.UpdatePoolParams{
+ MinIdleRunners: &minIdleRunners,
+ MaxRunners: &maxRunners,
+ }
+ pool, err := updateOrgPool(suite.cli, suite.authToken, orgID, orgPoolID, poolParams)
+ suite.NoError(err, "error updating organization pool")
+ return pool
+}
+
+func (suite *GarmSuite) WaitOrgRunningIdleInstances(orgID string, timeout time.Duration) {
+ t := suite.T()
+ orgPools, err := listOrgPools(suite.cli, suite.authToken, orgID)
+ suite.NoError(err, "error listing organization pools")
+ for _, pool := range orgPools {
+ err := suite.WaitPoolInstances(pool.ID, commonParams.InstanceRunning, params.RunnerIdle, timeout)
+ if err != nil {
+ suite.dumpOrgInstancesDetails(orgID)
+ t.Errorf("timeout waiting for organization %s instances to reach status: %s and runner status: %s", orgID, commonParams.InstanceRunning, params.RunnerIdle)
+ }
+ }
+}
+
+func (suite *GarmSuite) dumpOrgInstancesDetails(orgID string) {
+ t := suite.T()
+ // print org details
+ t.Logf("Dumping org details with org_id %s", orgID)
+ org, err := getOrg(suite.cli, suite.authToken, orgID)
+ suite.NoError(err, "error getting organization")
+ err = printJSONResponse(org)
+ suite.NoError(err, "error printing organization")
+
+ // print org instances details
+ t.Logf("Dumping org instances details for org %s", orgID)
+ instances, err := listOrgInstances(suite.cli, suite.authToken, orgID)
+ suite.NoError(err, "error listing organization instances")
+ for _, instance := range instances {
+ instance, err := getInstance(suite.cli, suite.authToken, instance.Name)
+ suite.NoError(err, "error getting instance")
+ t.Logf("Instance info for instace %s", instance.Name)
+ err = printJSONResponse(instance)
+ suite.NoError(err, "error printing instance")
+ }
+}
diff --git a/test/integration/provider/garm-external-provider b/test/integration/provider/garm-external-provider
new file mode 100755
index 00000000..88e6f46e
--- /dev/null
+++ b/test/integration/provider/garm-external-provider
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+set -e
+set -o pipefail
+
+if [ ! -t 0 ]
+then
+ INPUT=$(cat -)
+fi
+
+if [ -z "$GARM_PROVIDER_CONFIG_FILE" ]
+then
+ echo "no config file specified in env"
+ exit 1
+fi
+
+source "$GARM_PROVIDER_CONFIG_FILE"
+
+function CreateInstance() {
+ if [ -z "$INPUT" ]; then
+ echo "expected build params in stdin"
+ exit 1
+ fi
+
+ jq -rnc '{"provider_id": "test-provider-id", "name": "test-instance-name", "os_type": "linux", "os_name": "ubuntu", "os_version": "20.04", "os_arch": "x86_64", "status": "running"}'
+}
+
+case "$GARM_COMMAND" in
+ "CreateInstance")
+ CreateInstance
+ ;;
+ "DeleteInstance")
+ echo "RemoveAllInstances not implemented"
+ exit 1
+ ;;
+ "GetInstance")
+ echo "Get instance with id: ${GARM_INSTANCE_ID}"
+ ;;
+ "ListInstances")
+ echo "List instances with pool id: ${GARM_POOL_ID}"
+ ;;
+ "StartInstance")
+ echo "Start instance: ${GARM_INSTANCE_NAME} with id: ${GARM_INSTANCE_ID}"
+ ;;
+ "StopInstance")
+ echo "Stop instance: ${GARM_INSTANCE_NAME} with id: ${GARM_INSTANCE_ID}"
+ ;;
+ "RemoveAllInstances")
+ echo "RemoveAllInstances not implemented"
+ exit 1
+ ;;
+ *)
+ echo "Invalid GARM provider command: \"$GARM_COMMAND\""
+ exit 1
+ ;;
+esac
diff --git a/test/integration/repositories_test.go b/test/integration/repositories_test.go
new file mode 100644
index 00000000..1b0558f9
--- /dev/null
+++ b/test/integration/repositories_test.go
@@ -0,0 +1,221 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/google/go-github/v72/github"
+ "golang.org/x/oauth2"
+
+ commonParams "github.com/cloudbase/garm-provider-common/params"
+ "github.com/cloudbase/garm/params"
+)
+
+func (suite *GarmSuite) EnsureTestCredentials(name string, oauthToken string, endpointName string) {
+ t := suite.T()
+ t.Log("Ensuring test credentials exist")
+ createCredsParams := params.CreateGithubCredentialsParams{
+ Name: name,
+ Endpoint: endpointName,
+ Description: "GARM test credentials",
+ AuthType: params.ForgeAuthTypePAT,
+ PAT: params.GithubPAT{
+ OAuth2Token: oauthToken,
+ },
+ }
+ suite.CreateGithubCredentials(createCredsParams)
+
+ createCredsParams.Name = fmt.Sprintf("%s-clone", name)
+ suite.CreateGithubCredentials(createCredsParams)
+}
+
+func (suite *GarmSuite) TestRepositories() {
+ t := suite.T()
+
+ t.Logf("Update repo with repo_id %s", suite.repo.ID)
+ updateParams := params.UpdateEntityParams{
+ CredentialsName: fmt.Sprintf("%s-clone", suite.credentialsName),
+ }
+ repo, err := updateRepo(suite.cli, suite.authToken, suite.repo.ID, updateParams)
+ suite.NoError(err, "error updating repository")
+ suite.Equal(fmt.Sprintf("%s-clone", suite.credentialsName), repo.CredentialsName, "credentials name mismatch")
+ suite.repo = repo
+
+ hookRepoInfo := suite.InstallRepoWebhook(suite.repo.ID)
+ suite.ValidateRepoWebhookInstalled(suite.ghToken, hookRepoInfo.URL, orgName, repoName)
+ suite.UninstallRepoWebhook(suite.repo.ID)
+ suite.ValidateRepoWebhookUninstalled(suite.ghToken, hookRepoInfo.URL, orgName, repoName)
+
+ suite.InstallRepoWebhook(suite.repo.ID)
+ suite.ValidateRepoWebhookInstalled(suite.ghToken, hookRepoInfo.URL, orgName, repoName)
+
+ repoPoolParams := params.CreatePoolParams{
+ MaxRunners: 2,
+ MinIdleRunners: 0,
+ Flavor: "default",
+ Image: "ubuntu:24.04",
+ OSType: commonParams.Linux,
+ OSArch: commonParams.Amd64,
+ ProviderName: "lxd_local",
+ Tags: []string{"repo-runner"},
+ Enabled: true,
+ }
+
+ repoPool := suite.CreateRepoPool(suite.repo.ID, repoPoolParams)
+ suite.Equal(repoPool.MaxRunners, repoPoolParams.MaxRunners, "max runners mismatch")
+ suite.Equal(repoPool.MinIdleRunners, repoPoolParams.MinIdleRunners, "min idle runners mismatch")
+
+ repoPoolGet := suite.GetRepoPool(suite.repo.ID, repoPool.ID)
+ suite.Equal(*repoPool, *repoPoolGet, "pool get mismatch")
+
+ suite.DeleteRepoPool(suite.repo.ID, repoPool.ID)
+
+ repoPool = suite.CreateRepoPool(suite.repo.ID, repoPoolParams)
+ updatedRepoPool := suite.UpdateRepoPool(suite.repo.ID, repoPool.ID, repoPoolParams.MaxRunners, 1)
+ suite.NotEqual(updatedRepoPool.MinIdleRunners, repoPool.MinIdleRunners, "min idle runners mismatch")
+
+ suite.WaitRepoRunningIdleInstances(suite.repo.ID, 6*time.Minute)
+}
+
+func (suite *GarmSuite) InstallRepoWebhook(id string) *params.HookInfo {
+ t := suite.T()
+ t.Logf("Install repo webhook with repo_id %s", id)
+ webhookParams := params.InstallWebhookParams{
+ WebhookEndpointType: params.WebhookEndpointDirect,
+ }
+ _, err := installRepoWebhook(suite.cli, suite.authToken, id, webhookParams)
+ suite.NoError(err, "error installing repository webhook")
+
+ webhookInfo, err := getRepoWebhook(suite.cli, suite.authToken, id)
+ suite.NoError(err, "error getting repository webhook")
+ return webhookInfo
+}
+
+func (suite *GarmSuite) ValidateRepoWebhookInstalled(ghToken, url, orgName, repoName string) {
+ hook, err := getGhRepoWebhook(url, ghToken, orgName, repoName)
+ suite.NoError(err, "error getting github webhook")
+ suite.NotNil(hook, "github webhook with url %s, for repo %s/%s was not properly installed", url, orgName, repoName)
+}
+
+func getGhRepoWebhook(url, ghToken, orgName, repoName string) (*github.Hook, error) {
+ client := getGithubClient(ghToken)
+ ghRepoHooks, _, err := client.Repositories.ListHooks(context.Background(), orgName, repoName, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, hook := range ghRepoHooks {
+ hookURL := hook.Config.GetURL()
+ if hookURL == url {
+ return hook, nil
+ }
+ }
+
+ return nil, nil
+}
+
+func getGithubClient(oauthToken string) *github.Client {
+ ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: oauthToken})
+ tc := oauth2.NewClient(context.Background(), ts)
+ return github.NewClient(tc)
+}
+
+func (suite *GarmSuite) UninstallRepoWebhook(id string) {
+ t := suite.T()
+ t.Logf("Uninstall repo webhook with repo_id %s", id)
+ err := uninstallRepoWebhook(suite.cli, suite.authToken, id)
+ suite.NoError(err, "error uninstalling repository webhook")
+}
+
+func (suite *GarmSuite) ValidateRepoWebhookUninstalled(ghToken, url, orgName, repoName string) {
+ hook, err := getGhRepoWebhook(url, ghToken, orgName, repoName)
+ suite.NoError(err, "error getting github webhook")
+ suite.Nil(hook, "github webhook with url %s, for repo %s/%s was not properly uninstalled", url, orgName, repoName)
+}
+
+func (suite *GarmSuite) CreateRepoPool(repoID string, poolParams params.CreatePoolParams) *params.Pool {
+ t := suite.T()
+ t.Logf("Create repo pool with repo_id %s and pool_params %+v", repoID, poolParams)
+ pool, err := createRepoPool(suite.cli, suite.authToken, repoID, poolParams)
+ suite.NoError(err, "error creating repository pool")
+ return pool
+}
+
+func (suite *GarmSuite) GetRepoPool(repoID, repoPoolID string) *params.Pool {
+ t := suite.T()
+ t.Logf("Get repo pool repo_id %s and pool_id %s", repoID, repoPoolID)
+ pool, err := getRepoPool(suite.cli, suite.authToken, repoID, repoPoolID)
+ suite.NoError(err, "error getting repository pool")
+ return pool
+}
+
+func (suite *GarmSuite) DeleteRepoPool(repoID, repoPoolID string) {
+ t := suite.T()
+ t.Logf("Delete repo pool with repo_id %s and pool_id %s", repoID, repoPoolID)
+ err := deleteRepoPool(suite.cli, suite.authToken, repoID, repoPoolID)
+ suite.NoError(err, "error deleting repository pool")
+}
+
+func (suite *GarmSuite) UpdateRepoPool(repoID, repoPoolID string, maxRunners, minIdleRunners uint) *params.Pool {
+ t := suite.T()
+ t.Logf("Update repo pool with repo_id %s and pool_id %s", repoID, repoPoolID)
+ poolParams := params.UpdatePoolParams{
+ MinIdleRunners: &minIdleRunners,
+ MaxRunners: &maxRunners,
+ }
+ pool, err := updateRepoPool(suite.cli, suite.authToken, repoID, repoPoolID, poolParams)
+ suite.NoError(err, "error updating repository pool")
+ return pool
+}
+
+func (suite *GarmSuite) WaitRepoRunningIdleInstances(repoID string, timeout time.Duration) {
+ t := suite.T()
+ repoPools, err := listRepoPools(suite.cli, suite.authToken, repoID)
+ suite.NoError(err, "error listing repo pools")
+ for _, pool := range repoPools {
+ err := suite.WaitPoolInstances(pool.ID, commonParams.InstanceRunning, params.RunnerIdle, timeout)
+ if err != nil {
+ suite.dumpRepoInstancesDetails(repoID)
+ t.Errorf("error waiting for pool instances to be running idle: %v", err)
+ }
+ }
+}
+
+func (suite *GarmSuite) dumpRepoInstancesDetails(repoID string) {
+ t := suite.T()
+ // print repo details
+ t.Logf("Dumping repo details for repo %s", repoID)
+ repo, err := getRepo(suite.cli, suite.authToken, repoID)
+ suite.NoError(err, "error getting repo")
+ err = printJSONResponse(repo)
+ suite.NoError(err, "error printing repo")
+
+ // print repo instances details
+ t.Logf("Dumping repo instances details for repo %s", repoID)
+ instances, err := listRepoInstances(suite.cli, suite.authToken, repoID)
+ suite.NoError(err, "error listing repo instances")
+ for _, instance := range instances {
+ instance, err := getInstance(suite.cli, suite.authToken, instance.Name)
+ suite.NoError(err, "error getting instance")
+ t.Logf("Instance info for instance %s", instance.Name)
+ err = printJSONResponse(instance)
+ suite.NoError(err, "error printing instance")
+ }
+}
diff --git a/test/integration/scripts/setup-garm.sh b/test/integration/scripts/setup-garm.sh
new file mode 100755
index 00000000..40a61943
--- /dev/null
+++ b/test/integration/scripts/setup-garm.sh
@@ -0,0 +1,109 @@
+#!/usr/bin/env bash
+set -o errexit
+
+DIR="$(dirname $0)"
+BINARIES_DIR="$PWD/bin"
+CONTRIB_DIR="$PWD/contrib"
+export CONFIG_DIR="$PWD/test/integration/config"
+export CONFIG_DIR_PROV="$PWD/test/integration/provider"
+export GARM_CONFIG_DIR=${GARM_CONFIG_DIR:-$(mktemp -d)}
+export PROVIDER_BIN_DIR="$GARM_CONFIG_DIR/providers.d/lxd"
+export IS_GH_WORKFLOW=${IS_GH_WORKFLOW:-"true"}
+export LXD_PROVIDER_LOCATION=${LXD_PROVIDER_LOCATION:-""}
+export RUN_USER=${RUN_USER:-$USER}
+export GARM_PORT=${GARM_PORT:-"9997"}
+export GARM_SERVICE_NAME=${GARM_SERVICE_NAME:-"garm"}
+export GARM_CONFIG_FILE=${GARM_CONFIG_FILE:-"${GARM_CONFIG_DIR}/config.toml"}
+export LXD_REMOTE_SERVER=${LXD_REMOTE_SERVER:-"https://cloud-images.ubuntu.com/releases"}
+
+if [ -f "$GITHUB_ENV" ];then
+ echo "export GARM_CONFIG_DIR=${GARM_CONFIG_DIR}" >> $GITHUB_ENV
+ echo "export GARM_SERVICE_NAME=${GARM_SERVICE_NAME}" >> $GITHUB_ENV
+fi
+
+if [[ ! -f $BINARIES_DIR/garm ]] || [[ ! -f $BINARIES_DIR/garm-cli ]]; then
+ echo "ERROR: Please build GARM binaries first"
+ exit 1
+fi
+
+
+if [[ -z $GH_TOKEN ]]; then echo "ERROR: The env variable GH_TOKEN is not set"; exit 1; fi
+if [[ -z $CREDENTIALS_NAME ]]; then echo "ERROR: The env variable CREDENTIALS_NAME is not set"; exit 1; fi
+if [[ -z $GARM_BASE_URL ]]; then echo "ERROR: The env variable GARM_BASE_URL is not set"; exit 1; fi
+
+# Generate a random 32-char secret for JWT_AUTH_SECRET and DB_PASSPHRASE.
+function generate_secret() {
+ (tr -dc 'a-zA-Z0-9!@#$%^&*()_+?><~\`;' < /dev/urandom | head -c 32) 2>/dev/null
+}
+
+# Wait for a port to open at a given address.
+function wait_open_port() {
+ local ADDRESS="$1"
+ local PORT="$2"
+ local TIMEOUT=30
+ SECONDS=0
+ while true; do
+ if [[ $SECONDS -gt $TIMEOUT ]]; then
+ echo "ERROR: Port $PORT didn't open at $ADDRESS within $TIMEOUT seconds"
+ return 1
+ fi
+ nc -v -w 5 -z "$ADDRESS" "$PORT" &>/dev/null && break || sleep 1
+ done
+ echo "Port $PORT at address $ADDRESS is open"
+}
+
+export JWT_AUTH_SECRET="$(generate_secret)"
+export DB_PASSPHRASE="$(generate_secret)"
+
+if [ $IS_GH_WORKFLOW == "true" ]; then
+ # Group "adm" is the LXD daemon group as set by the "canonical/setup-lxd" GitHub action.
+ sudo useradd --shell /usr/bin/false --system --groups adm --no-create-home garm
+fi
+
+sudo mkdir -p ${GARM_CONFIG_DIR}
+sudo mkdir -p $PROVIDER_BIN_DIR
+sudo chown -R $RUN_USER:$RUN_USER ${PROVIDER_BIN_DIR}
+sudo chown -R $RUN_USER:$RUN_USER ${GARM_CONFIG_DIR}
+
+export LXD_PROVIDER_EXECUTABLE="$PROVIDER_BIN_DIR/garm-provider-lxd"
+export LXD_PROVIDER_CONFIG="${GARM_CONFIG_DIR}/garm-provider-lxd.toml"
+cat $CONFIG_DIR/garm-provider-lxd.toml| envsubst | sudo tee $LXD_PROVIDER_CONFIG > /dev/null
+
+function clone_and_build_lxd_provider() {
+ git clone https://github.com/cloudbase/garm-provider-lxd ~/garm-provider-lxd
+ pushd ~/garm-provider-lxd
+ CGO_ENABLED=1 go build -o $LXD_PROVIDER_EXECUTABLE
+ popd
+}
+
+if [ $IS_GH_WORKFLOW == "true" ]; then
+ clone_and_build_lxd_provider
+else
+ if [ -z "$LXD_PROVIDER_LOCATION" ];then
+ clone_and_build_lxd_provider
+ else
+ cp $LXD_PROVIDER_LOCATION $LXD_PROVIDER_EXECUTABLE
+ fi
+
+fi
+
+cat $CONFIG_DIR/config.toml | envsubst | sudo tee ${GARM_CONFIG_DIR}/config.toml > /dev/null
+sudo chown -R $RUN_USER:$RUN_USER ${GARM_CONFIG_DIR}
+
+sudo mkdir -p ${GARM_CONFIG_DIR}/test-provider
+sudo touch $CONFIG_DIR_PROV/config
+sudo cp $CONFIG_DIR_PROV/* ${GARM_CONFIG_DIR}/test-provider
+
+sudo mv $BINARIES_DIR/* /usr/local/bin/
+mkdir -p $HOME/.local/share/systemd/user/
+cat $CONFIG_DIR/garm.service| envsubst | sudo tee /lib/systemd/system/${GARM_SERVICE_NAME}@.service > /dev/null
+sudo chown -R $RUN_USER:$RUN_USER ${GARM_CONFIG_DIR}
+
+sudo systemctl daemon-reload
+sudo systemctl enable ${GARM_SERVICE_NAME}@${RUN_USER}
+sudo systemctl restart ${GARM_SERVICE_NAME}@${RUN_USER}
+wait_open_port 127.0.0.1 ${GARM_PORT}
+
+echo "GARM is up and running"
+echo "GARM config file is $GARM_CONFIG_FILE"
+echo "GARM service name is $GARM_SERVICE_NAME"
diff --git a/test/integration/scripts/taredown_garm.sh b/test/integration/scripts/taredown_garm.sh
new file mode 100755
index 00000000..c7b80a69
--- /dev/null
+++ b/test/integration/scripts/taredown_garm.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+if [ -f "$GITHUB_ENV" ];then
+ source $GITHUB_ENV
+fi
+
+if [ -z $GARM_CONFIG_DIR ]; then
+ echo "ERROR: GARM_CONFIG_DIR is not set"
+ exit 1
+fi
+
+if [ -z $GARM_SERVICE_NAME ]; then
+ echo "ERROR: GARM_SERVICE_NAME is not set"
+ exit 1
+fi
+
+if [ -f "$HOME/.local/share/systemd/user/${GARM_SERVICE_NAME}.service" ];then
+ sudo systemctl stop $GARM_SERVICE_NAME@${RUN_USER}
+ sudo systemctl disable $GARM_SERVICE_NAME@${RUN_USER}
+ sudo rm /lib/systemd/system/${GARM_SERVICE_NAME}@.service
+ sudo systemctl daemon-reload
+fi
+
+if [ -d "$GARM_CONFIG_DIR" ] && [ -f "$GARM_CONFIG_DIR/config.toml" ] && [ -f "$GARM_CONFIG_DIR/garm-provider-lxd.toml" ];then
+ rm -rf ${GARM_CONFIG_DIR}
+fi
\ No newline at end of file
diff --git a/test/integration/suite_test.go b/test/integration/suite_test.go
new file mode 100644
index 00000000..ca6b3030
--- /dev/null
+++ b/test/integration/suite_test.go
@@ -0,0 +1,225 @@
+//go:build integration
+// +build integration
+
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/go-openapi/runtime"
+ openapiRuntimeClient "github.com/go-openapi/runtime/client"
+ "github.com/stretchr/testify/suite"
+
+ "github.com/cloudbase/garm/client"
+ "github.com/cloudbase/garm/params"
+)
+
+var (
+ orgName string
+ repoName string
+ orgWebhookSecret string
+ workflowFileName string
+)
+
+type GarmSuite struct {
+ suite.Suite
+ cli *client.GarmAPI
+ authToken runtime.ClientAuthInfoWriter
+ ghToken string
+ credentialsName string
+ repo *params.Repository
+}
+
+func (suite *GarmSuite) SetupSuite() {
+ t := suite.T()
+ suite.ghToken = os.Getenv("GH_TOKEN")
+ orgWebhookSecret = os.Getenv("ORG_WEBHOOK_SECRET")
+ workflowFileName = os.Getenv("WORKFLOW_FILE_NAME")
+ baseURL := os.Getenv("GARM_BASE_URL")
+ adminPassword := os.Getenv("GARM_PASSWORD")
+ adminUsername := os.Getenv("GARM_ADMIN_USERNAME")
+ adminFullName := "GARM Admin"
+ adminEmail := "admin@example.com"
+ garmURL, err := url.Parse(baseURL)
+ suite.NoError(err, "error parsing GARM_BASE_URL")
+
+ apiPath, err := url.JoinPath(garmURL.Path, client.DefaultBasePath)
+ suite.NoError(err, "error joining path")
+
+ transportCfg := client.DefaultTransportConfig().
+ WithHost(garmURL.Host).
+ WithBasePath(apiPath).
+ WithSchemes([]string{garmURL.Scheme})
+ suite.cli = client.NewHTTPClientWithConfig(nil, transportCfg)
+
+ t.Log("First run")
+ newUser := params.NewUserParams{
+ Username: adminUsername,
+ Password: adminPassword,
+ FullName: adminFullName,
+ Email: adminEmail,
+ }
+ _, err = firstRun(suite.cli, newUser)
+ suite.NoError(err, "error at first run")
+
+ t.Log("Login")
+ loginParams := params.PasswordLoginParams{
+ Username: adminUsername,
+ Password: adminPassword,
+ }
+ token, err := login(suite.cli, loginParams)
+ suite.NoError(err, "error at login")
+ suite.authToken = openapiRuntimeClient.BearerToken(token)
+ t.Log("Log in successful")
+
+ suite.credentialsName = os.Getenv("CREDENTIALS_NAME")
+ suite.EnsureTestCredentials(suite.credentialsName, suite.ghToken, "github.com")
+
+ t.Log("Create repository")
+ orgName = os.Getenv("ORG_NAME")
+ repoName = os.Getenv("REPO_NAME")
+ repoWebhookSecret := os.Getenv("REPO_WEBHOOK_SECRET")
+ createParams := params.CreateRepoParams{
+ Owner: orgName,
+ Name: repoName,
+ CredentialsName: suite.credentialsName,
+ WebhookSecret: repoWebhookSecret,
+ }
+ suite.repo, err = createRepo(suite.cli, suite.authToken, createParams)
+ suite.NoError(err, "error creating repository")
+ suite.Equal(orgName, suite.repo.Owner, "owner name mismatch")
+ suite.Equal(repoName, suite.repo.Name, "repo name mismatch")
+ suite.Equal(suite.credentialsName, suite.repo.CredentialsName, "credentials name mismatch")
+}
+
+func (suite *GarmSuite) TearDownSuite() {
+ t := suite.T()
+ t.Log("Graceful cleanup")
+ // disable all the pools
+ pools, err := listPools(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing pools")
+ enabled := false
+ poolParams := params.UpdatePoolParams{Enabled: &enabled}
+ for _, pool := range pools {
+ _, err := updatePool(suite.cli, suite.authToken, pool.ID, poolParams)
+ suite.NoError(err, "error disabling pool")
+ t.Logf("Pool %s disabled during stage graceful_cleanup", pool.ID)
+ }
+
+ // delete all the instances
+ for _, pool := range pools {
+ poolInstances, err := listPoolInstances(suite.cli, suite.authToken, pool.ID)
+ suite.NoError(err, "error listing pool instances")
+ for _, instance := range poolInstances {
+ err := deleteInstance(suite.cli, suite.authToken, instance.Name, false, false)
+ suite.NoError(err, "error deleting instance")
+ t.Logf("Instance deletion initiated for instace %s during stage graceful_cleanup", instance.Name)
+ }
+ }
+
+ // wait for all instances to be deleted
+ for _, pool := range pools {
+ err := suite.waitPoolNoInstances(pool.ID, 3*time.Minute)
+ suite.NoError(err, "error waiting for pool to have no instances")
+ }
+
+ // delete all the pools
+ for _, pool := range pools {
+ err := deletePool(suite.cli, suite.authToken, pool.ID)
+ suite.NoError(err, "error deleting pool")
+ t.Logf("Pool %s deleted during stage graceful_cleanup", pool.ID)
+ }
+
+ // delete all the repositories
+ repos, err := listRepos(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing repositories")
+ for _, repo := range repos {
+ err := deleteRepo(suite.cli, suite.authToken, repo.ID)
+ suite.NoError(err, "error deleting repository")
+ t.Logf("Repo %s deleted during stage graceful_cleanup", repo.ID)
+ }
+
+ // delete all the organizations
+ orgs, err := listOrgs(suite.cli, suite.authToken)
+ suite.NoError(err, "error listing organizations")
+ for _, org := range orgs {
+ err := deleteOrg(suite.cli, suite.authToken, org.ID)
+ suite.NoError(err, "error deleting organization")
+ t.Logf("Org %s deleted during stage graceful_cleanup", org.ID)
+ }
+}
+
+func TestGarmTestSuite(t *testing.T) {
+ suite.Run(t, new(GarmSuite))
+}
+
+func (suite *GarmSuite) waitPoolNoInstances(id string, timeout time.Duration) error {
+ t := suite.T()
+ var timeWaited time.Duration // default is 0
+ var pool *params.Pool
+ var err error
+
+ t.Logf("Wait until pool with id %s has no instances", id)
+ for timeWaited < timeout {
+ pool, err = getPool(suite.cli, suite.authToken, id)
+ suite.NoError(err, "error getting pool")
+ t.Logf("Current pool has %d instances", len(pool.Instances))
+ if len(pool.Instances) == 0 {
+ return nil
+ }
+ time.Sleep(5 * time.Second)
+ timeWaited += 5 * time.Second
+ }
+
+ err = suite.dumpPoolInstancesDetails(pool.ID)
+ suite.NoError(err, "error dumping pool instances details")
+
+ return fmt.Errorf("failed to wait for pool %s to have no instances", pool.ID)
+}
+
+func (suite *GarmSuite) GhOrgRunnersCleanup(ghToken, orgName, controllerID string) error {
+ t := suite.T()
+ t.Logf("Cleanup Github runners for controller %s and org %s", controllerID, orgName)
+
+ client := getGithubClient(ghToken)
+ ghOrgRunners, _, err := client.Actions.ListOrganizationRunners(context.Background(), orgName, nil)
+ if err != nil {
+ return err
+ }
+
+ // Remove organization runners
+ controllerLabel := fmt.Sprintf("runner-controller-id:%s", controllerID)
+ for _, orgRunner := range ghOrgRunners.Runners {
+ for _, label := range orgRunner.Labels {
+ if label.GetName() == controllerLabel {
+ if _, err := client.Actions.RemoveOrganizationRunner(context.Background(), orgName, orgRunner.GetID()); err != nil {
+ // We don't fail if we can't remove a single runner. This
+ // is a best effort to try and remove all the orphan runners.
+ t.Logf("Failed to remove organization runner %s: %v", orgRunner.GetName(), err)
+ break
+ }
+ t.Logf("Removed organization runner %s", orgRunner.GetName())
+ break
+ }
+ }
+ }
+ return nil
+}
diff --git a/test/integration/utils.go b/test/integration/utils.go
new file mode 100644
index 00000000..1fa35b5e
--- /dev/null
+++ b/test/integration/utils.go
@@ -0,0 +1,48 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+package integration
+
+import (
+ "encoding/json"
+ "fmt"
+ "log/slog"
+)
+
+func printJSONResponse(resp interface{}) error {
+ b, err := json.MarshalIndent(resp, "", " ")
+ if err != nil {
+ return err
+ }
+ slog.Info(string(b))
+ return nil
+}
+
+type apiCodeGetter interface {
+ IsCode(code int) bool
+}
+
+func expectAPIStatusCode(err error, expectedCode int) error {
+ if err == nil {
+ return fmt.Errorf("expected error, got nil")
+ }
+ apiErr, ok := err.(apiCodeGetter)
+ if !ok {
+ return fmt.Errorf("expected API error, got %v (%T)", err, err)
+ }
+ if !apiErr.IsCode(expectedCode) {
+ return fmt.Errorf("expected status code %d: %v", expectedCode, err)
+ }
+
+ return nil
+}
diff --git a/testdata/config.toml b/testdata/config.toml
index 1182ad4a..337c0dd6 100644
--- a/testdata/config.toml
+++ b/testdata/config.toml
@@ -1,32 +1,42 @@
[default]
-# This URL is used by instances to send back status messages as they install
-# the github actions runner. Status messages can be seen by querying the
-# runner status in garm.
-# Note: If you're using a reverse proxy in front of your garm installation,
-# this URL needs to point to the address of the reverse proxy. Using TLS is
-# highly encouraged.
-callback_url = "https://garm.example.com/api/v1/callbacks/status"
-
-# This URL is used by instances to retrieve information they need to set themselves
-# up. Access to this URL is granted using the same JWT token used to send back
-# status updates. Once the instance transitions to "installed" or "failed" state,
-# access to both the status and metadata endpoints is disabled.
-# Note: If you're using a reverse proxy in front of your garm installation,
-# this URL needs to point to the address of the reverse proxy. Using TLS is
-# highly encouraged.
-metadata_url = "https://garm.example.com/api/v1/metadata"
-
-# This folder is defined here for future use. Right now, we create a SSH
-# public/private key-pair.
-config_dir = "/etc/garm"
+# This option enables GARM to manage webhooks for repositories and organizations. Set this
+# to false to disable the API routes that manage webhooks.
+#
+# When managing webhooks, the PAT you're using must have the necessary access to create/list/delete
+# webhooks for repositories or organizations.
+enable_webhook_management = true
+# DEPRECATED: Use the [logging] section to set this option.
# Uncomment this line if you'd like to log to a file instead of standard output.
# log_file = "/tmp/runner-manager.log"
+# DEPRECATED: Use the [logging] section to set this option.
# Enable streaming logs via web sockets. Use garm-cli debug-log.
enable_log_streamer = false
+# Enable the golang debug server. See the documentation in the "doc" folder for more information.
+debug_server = false
+
+
+[logging]
+# Uncomment this line if you'd like to log to a file instead of standard output.
+# log_file = "/tmp/runner-manager.log"
+
+# enable_log_streamer enables streaming the logs over websockets
+enable_log_streamer = true
+# log_format is the output format of the logs. GARM uses structured logging and can
+# output as "text" or "json"
+log_format = "text"
+# log_level is the logging level GARM will output. Available log levels are:
+# * debug
+# * info
+# * warn
+# * error
+log_level = "debug"
+# log_source will output information about the function that generated the log line.
+log_source = false
+
[metrics]
# Toggle metrics. If set to false, the API endpoint for metrics collection will
# be disabled.
@@ -72,51 +82,51 @@ time_to_live = "8760h"
certificate = ""
# The path on disk to the corresponding private key for the certificate.
key = ""
+ [apiserver.webui]
+ enable = true
[database]
# Turn on/off debugging for database queries.
debug = false
# Database backend to use. Currently supported backends are:
# * sqlite3
- # * mysql
backend = "sqlite3"
# the passphrase option is a temporary measure by which we encrypt the webhook
# secret that gets saved to the database, using AES256. In the future, secrets
# will be saved to something like Barbican or Vault, eliminating the need for
# this. This setting needs to be 32 characters in size.
passphrase = "shreotsinWadquidAitNefayctowUrph"
- [database.mysql]
- # If MySQL is used, these are the credentials and connection information used
- # to connect to the server instance.
- # database username
- username = ""
- # Database password
- password = ""
- # hostname to connect to
- hostname = ""
- # database name
- database = ""
[database.sqlite3]
# Path on disk to the sqlite3 database file.
db_file = "/etc/garm/garm.db"
-
+ # busy_timeout_seconds is an optional parameter that will set the
+ # sqlite3_busy_timeout to the specified value. This is useful when
+ # GARM may be under heavy load and the database is locked by some
+ # other go routine. The default value is 0.
+ busy_timeout_seconds = 5
# Currently, providers are defined statically in the config. This is due to the fact
# that we have not yet added support for storing secrets in something like Barbican
# or Vault. This will change in the future. However, for now, it's important to remember
# that once you create a pool using one of the providers defined here, the name of that
-# provider must not be changes, or the pool will no longer work. Make sure you remove any
+# provider must not be changed, or the pool will no longer work. Make sure you remove any
# pools before removing or changing a provider.
[[provider]]
- # An arbitrary string describing this provider.
- name = "lxd_local"
- # Provider type. Garm is designed to allow creating providers which are used to spin
- # up compute resources, which in turn will run the github runner software.
- # Currently, LXD is the only supprted provider, but more will be written in the future.
- provider_type = "lxd"
- # A short description of this provider. The name, description and provider types will
- # be included in the information returned by the API when listing available providers.
- description = "Local LXD installation"
+# An arbitrary string describing this provider.
+name = "lxd_local"
+# Provider type. Garm is designed to allow creating providers which are used to spin
+# up compute resources, which in turn will run the github runner software.
+# Currently, LXD is the only supprted provider, but more will be written in the future.
+provider_type = "lxd"
+# A short description of this provider. The name, description and provider types will
+# be included in the information returned by the API when listing available providers.
+description = "Local LXD installation"
+# DisableJITConfig explicitly disables JIT configuration and forces runner registration
+# tokens to be used. This may happen if a provider has not yet been updated to support
+# JIT configuration.
+#
+# Set this to true if your provider does not support JIT configuration.
+disable_jit_config = false
[provider.lxd]
# the path to the unix socket that LXD is listening on. This works if garm and LXD
# are on the same system, and this option takes precedence over the "url" option,
@@ -135,7 +145,7 @@ time_to_live = "8760h"
# * virtual-machine (default)
# * container
#
- instance_type = "virtual-machine"
+ instance_type = "container"
# enable/disable secure boot. If the image you select for the pool does not have a
# signed bootloader, set this to false, otherwise your instances won't boot.
secure_boot = false
@@ -187,6 +197,12 @@ time_to_live = "8760h"
name = "openstack_external"
description = "external openstack provider"
provider_type = "external"
+# DisableJITConfig explicitly disables JIT configuration and forces runner registration
+# tokens to be used. This may happen if a provider has not yet been updated to support
+# JIT configuration.
+#
+# Set this to true if your provider does not support JIT configuration.
+disable_jit_config = false
[provider.external]
# config file passed to the executable via GARM_PROVIDER_CONFIG_FILE environment variable
config_file = "/etc/garm/providers.d/openstack/keystonerc"
@@ -199,6 +215,12 @@ provider_type = "external"
name = "azure_external"
description = "external azure provider"
provider_type = "external"
+# DisableJITConfig explicitly disables JIT configuration and forces runner registration
+# tokens to be used. This may happen if a provider has not yet been updated to support
+# JIT configuration.
+#
+# Set this to true if your provider does not support JIT configuration.
+disable_jit_config = false
[provider.external]
# config file passed to the executable via GARM_PROVIDER_CONFIG_FILE environment variable
config_file = "/etc/garm/providers.d/azure/config.sh"
@@ -206,32 +228,3 @@ provider_type = "external"
# anything (bash, a binary, python, etc). See documentation in this repo on how to write an
# external provider.
provider_executable = "/etc/garm/providers.d/azure/garm-external-provider"
-
-# This is a list of credentials that you can define as part of the repository
-# or organization definitions. They are not saved inside the database, as there
-# is no Vault integration (yet). This will change in the future.
-# Credentials defined here can be listed using the API. Obviously, only the name
-# and descriptions are returned.
-[[github]]
- name = "gabriel"
- description = "github token or user gabriel"
- # This is a personal token with access to the repositories and organizations
- # you plan on adding to garm. The "workflow" option needs to be selected in order
- # to work with repositories, and the admin:org needs to be set if you plan on
- # adding an organization.
- oauth2_token = "super secret token"
- # base_url (optional) is the URL at which your GitHub Enterprise Server can be accessed.
- # If these credentials are for github.com, leave this setting blank
- base_url = "https://ghe.example.com"
- # api_base_url (optional) is the base URL where the GitHub Enterprise Server API can be accessed.
- # Leave this blank if these credentials are for github.com.
- api_base_url = "https://ghe.example.com"
- # upload_base_url (optional) is the base URL where the GitHub Enterprise Server upload API can be accessed.
- # Leave this blank if these credentials are for github.com, or if you don't have a separate URL
- # for the upload API.
- upload_base_url = "https://api.ghe.example.com"
- # ca_cert_bundle (optional) is the CA certificate bundle in PEM format that will be used by the github
- # client to talk to the API. This bundle will also be sent to all runners as bootstrap params.
- # Use this option if you're using a self signed certificate.
- # Leave this blank if you're using github.com or if your certificare is signed by a valid CA.
- ca_cert_bundle = "/etc/garm/ghe.crt"
diff --git a/testdata/db/v0.1.4/garm.db b/testdata/db/v0.1.4/garm.db
new file mode 100644
index 00000000..7308e31f
Binary files /dev/null and b/testdata/db/v0.1.4/garm.db differ
diff --git a/util/appdefaults/appdefaults.go b/util/appdefaults/appdefaults.go
index 70b779bc..cc53f794 100644
--- a/util/appdefaults/appdefaults.go
+++ b/util/appdefaults/appdefaults.go
@@ -1,3 +1,16 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
package appdefaults
import "time"
@@ -19,11 +32,6 @@ const (
// configuration file.
DefaultConfigFilePath = "/etc/garm/config.toml"
- // DefaultUser is the default username that should exist on the instances.
- DefaultUser = "runner"
- // DefaultUserShell is the shell for the default user.
- DefaultUserShell = "/bin/bash"
-
// DefaultPoolQueueSize is the default size for a pool queue.
DefaultPoolQueueSize = 10
@@ -32,17 +40,16 @@ const (
// uploadBaseURL is the default URL for guthub uploads.
GithubDefaultUploadBaseURL = "https://uploads.github.com/"
+
+ // metrics data update interval
+ DefaultMetricsUpdateInterval = 60 * time.Second
)
-var (
- // DefaultConfigDir is the default path on disk to the config dir. The config
- // file will probably be in the same folder, but it is not mandatory.
- DefaultConfigDir = "/etc/garm"
+var Version string
- // DefaultUserGroups are the groups the default user will be part of.
- DefaultUserGroups = []string{
- "sudo", "adm", "cdrom", "dialout",
- "dip", "video", "plugdev", "netdev",
- "docker", "lxd",
+func GetVersion() string {
+ if Version == "" {
+ Version = "v0.0.0-unknown"
}
-)
+ return Version
+}
diff --git a/util/exec/exec.go b/util/exec/exec.go
deleted file mode 100644
index 654b0955..00000000
--- a/util/exec/exec.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package exec
-
-import (
- "bytes"
- "context"
- "os/exec"
-
- "github.com/pkg/errors"
-)
-
-func Exec(ctx context.Context, providerBin string, stdinData []byte, environ []string) ([]byte, error) {
- stdout := &bytes.Buffer{}
- stderr := &bytes.Buffer{}
- c := exec.CommandContext(ctx, providerBin)
- c.Env = environ
- c.Stdin = bytes.NewBuffer(stdinData)
- c.Stdout = stdout
- c.Stderr = stderr
-
- if err := c.Run(); err != nil {
- return nil, errors.Wrapf(err, "provider binary failed with stdout: %s; stderr: %s", stdout.String(), stderr.String())
- }
-
- return stdout.Bytes(), nil
-}
diff --git a/util/exec/exec_nix.go b/util/exec/exec_nix.go
deleted file mode 100644
index 1525eca6..00000000
--- a/util/exec/exec_nix.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package exec
-
-import (
- "golang.org/x/sys/unix"
-)
-
-func IsExecutable(path string) bool {
- return unix.Access(path, unix.X_OK) == nil
-}
diff --git a/util/exec/exec_windows.go b/util/exec/exec_windows.go
deleted file mode 100644
index 0c17839c..00000000
--- a/util/exec/exec_windows.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package exec
-
-import (
- "os"
- "strings"
-)
-
-func IsExecutable(path string) bool {
- pathExt := os.Getenv("PATHEXT")
- execList := strings.Split(pathExt, ";")
- for _, ext := range execList {
- if strings.HasSuffix(path, ext) {
- return true
- }
- }
-
- return false
-}
diff --git a/util/github/client.go b/util/github/client.go
new file mode 100644
index 00000000..b4ca32e5
--- /dev/null
+++ b/util/github/client.go
@@ -0,0 +1,628 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package github
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/google/go-github/v72/github"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/cache"
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner/common"
+)
+
+type githubClient struct {
+ *github.ActionsService
+ org *github.OrganizationsService
+ repo *github.RepositoriesService
+ enterprise *github.EnterpriseService
+ rateLimit *github.RateLimitService
+
+ entity params.ForgeEntity
+ cli *github.Client
+}
+
+func (g *githubClient) ListEntityHooks(ctx context.Context, opts *github.ListOptions) (ret []*github.Hook, response *github.Response, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "ListHooks", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "ListHooks", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, response, err = g.repo.ListHooks(ctx, g.entity.Owner, g.entity.Name, opts)
+ case params.ForgeEntityTypeOrganization:
+ ret, response, err = g.org.ListHooks(ctx, g.entity.Owner, opts)
+ default:
+ return nil, nil, fmt.Errorf("invalid entity type: %s", g.entity.EntityType)
+ }
+ return ret, response, err
+}
+
+func (g *githubClient) GetEntityHook(ctx context.Context, id int64) (ret *github.Hook, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "GetHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "GetHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, _, err = g.repo.GetHook(ctx, g.entity.Owner, g.entity.Name, id)
+ case params.ForgeEntityTypeOrganization:
+ ret, _, err = g.org.GetHook(ctx, g.entity.Owner, id)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+ return ret, err
+}
+
+func (g *githubClient) createGithubEntityHook(ctx context.Context, hook *github.Hook) (ret *github.Hook, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "CreateHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "CreateHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, _, err = g.repo.CreateHook(ctx, g.entity.Owner, g.entity.Name, hook)
+ case params.ForgeEntityTypeOrganization:
+ ret, _, err = g.org.CreateHook(ctx, g.entity.Owner, hook)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+ return ret, err
+}
+
+func (g *githubClient) CreateEntityHook(ctx context.Context, hook *github.Hook) (ret *github.Hook, err error) {
+ switch g.entity.Credentials.ForgeType {
+ case params.GithubEndpointType:
+ return g.createGithubEntityHook(ctx, hook)
+ case params.GiteaEndpointType:
+ return g.createGiteaEntityHook(ctx, hook)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+}
+
+func (g *githubClient) DeleteEntityHook(ctx context.Context, id int64) (ret *github.Response, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "DeleteHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "DeleteHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, err = g.repo.DeleteHook(ctx, g.entity.Owner, g.entity.Name, id)
+ case params.ForgeEntityTypeOrganization:
+ ret, err = g.org.DeleteHook(ctx, g.entity.Owner, id)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+ return ret, err
+}
+
+func (g *githubClient) PingEntityHook(ctx context.Context, id int64) (ret *github.Response, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "PingHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "PingHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, err = g.repo.PingHook(ctx, g.entity.Owner, g.entity.Name, id)
+ case params.ForgeEntityTypeOrganization:
+ ret, err = g.org.PingHook(ctx, g.entity.Owner, id)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+ return ret, err
+}
+
+func (g *githubClient) ListEntityRunners(ctx context.Context, opts *github.ListRunnersOptions) (*github.Runners, *github.Response, error) {
+ var ret *github.Runners
+ var response *github.Response
+ var err error
+
+ metrics.GithubOperationCount.WithLabelValues(
+ "ListEntityRunners", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "ListEntityRunners", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, response, err = g.ListRunners(ctx, g.entity.Owner, g.entity.Name, opts)
+ case params.ForgeEntityTypeOrganization:
+ ret, response, err = g.ListOrganizationRunners(ctx, g.entity.Owner, opts)
+ case params.ForgeEntityTypeEnterprise:
+ ret, response, err = g.enterprise.ListRunners(ctx, g.entity.Owner, opts)
+ default:
+ return nil, nil, errors.New("invalid entity type")
+ }
+
+ return ret, response, err
+}
+
+func (g *githubClient) ListEntityRunnerApplicationDownloads(ctx context.Context) ([]*github.RunnerApplicationDownload, *github.Response, error) {
+ var ret []*github.RunnerApplicationDownload
+ var response *github.Response
+ var err error
+
+ metrics.GithubOperationCount.WithLabelValues(
+ "ListEntityRunnerApplicationDownloads", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "ListEntityRunnerApplicationDownloads", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, response, err = g.ListRunnerApplicationDownloads(ctx, g.entity.Owner, g.entity.Name)
+ case params.ForgeEntityTypeOrganization:
+ ret, response, err = g.ListOrganizationRunnerApplicationDownloads(ctx, g.entity.Owner)
+ case params.ForgeEntityTypeEnterprise:
+ ret, response, err = g.enterprise.ListRunnerApplicationDownloads(ctx, g.entity.Owner)
+ default:
+ return nil, nil, errors.New("invalid entity type")
+ }
+
+ return ret, response, err
+}
+
+func parseError(response *github.Response, err error) error {
+ var statusCode int
+ if response != nil {
+ statusCode = response.StatusCode
+ }
+
+ switch statusCode {
+ case http.StatusNotFound:
+ return runnerErrors.ErrNotFound
+ case http.StatusUnauthorized:
+ return runnerErrors.ErrUnauthorized
+ case http.StatusUnprocessableEntity:
+ return runnerErrors.ErrBadRequest
+ default:
+ if statusCode >= 100 && statusCode < 300 {
+ return nil
+ }
+ if err != nil {
+ errResp := &github.ErrorResponse{}
+ if errors.As(err, &errResp) && errResp.Response != nil {
+ switch errResp.Response.StatusCode {
+ case http.StatusNotFound:
+ return runnerErrors.ErrNotFound
+ case http.StatusUnauthorized:
+ return runnerErrors.ErrUnauthorized
+ case http.StatusUnprocessableEntity:
+ return runnerErrors.ErrBadRequest
+ default:
+ // ugly hack. Gitea returns 500 if we try to remove a runner that does not exist.
+ if strings.Contains(err.Error(), "does not exist") {
+ return runnerErrors.ErrNotFound
+ }
+ return err
+ }
+ }
+ return err
+ }
+ return errors.New("unknown error")
+ }
+}
+
+func (g *githubClient) RemoveEntityRunner(ctx context.Context, runnerID int64) error {
+ var response *github.Response
+ var err error
+
+ metrics.GithubOperationCount.WithLabelValues(
+ "RemoveEntityRunner", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "RemoveEntityRunner", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ response, err = g.RemoveRunner(ctx, g.entity.Owner, g.entity.Name, runnerID)
+ case params.ForgeEntityTypeOrganization:
+ response, err = g.RemoveOrganizationRunner(ctx, g.entity.Owner, runnerID)
+ case params.ForgeEntityTypeEnterprise:
+ response, err = g.enterprise.RemoveRunner(ctx, g.entity.Owner, runnerID)
+ default:
+ return errors.New("invalid entity type")
+ }
+
+ if err := parseError(response, err); err != nil {
+ return fmt.Errorf("error removing runner %d: %w", runnerID, err)
+ }
+
+ return nil
+}
+
+func (g *githubClient) CreateEntityRegistrationToken(ctx context.Context) (*github.RegistrationToken, *github.Response, error) {
+ var ret *github.RegistrationToken
+ var response *github.Response
+ var err error
+
+ metrics.GithubOperationCount.WithLabelValues(
+ "CreateEntityRegistrationToken", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "CreateEntityRegistrationToken", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, response, err = g.CreateRegistrationToken(ctx, g.entity.Owner, g.entity.Name)
+ case params.ForgeEntityTypeOrganization:
+ ret, response, err = g.CreateOrganizationRegistrationToken(ctx, g.entity.Owner)
+ case params.ForgeEntityTypeEnterprise:
+ ret, response, err = g.enterprise.CreateRegistrationToken(ctx, g.entity.Owner)
+ default:
+ return nil, nil, errors.New("invalid entity type")
+ }
+
+ return ret, response, err
+}
+
+func (g *githubClient) getOrganizationRunnerGroupIDByName(ctx context.Context, entity params.ForgeEntity, rgName string) (int64, error) {
+ opts := github.ListOrgRunnerGroupOptions{
+ ListOptions: github.ListOptions{
+ PerPage: 100,
+ },
+ }
+
+ for {
+ metrics.GithubOperationCount.WithLabelValues(
+ "ListOrganizationRunnerGroups", // label: operation
+ entity.LabelScope(), // label: scope
+ ).Inc()
+ runnerGroups, ghResp, err := g.ListOrganizationRunnerGroups(ctx, entity.Owner, &opts)
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "ListOrganizationRunnerGroups", // label: operation
+ entity.LabelScope(), // label: scope
+ ).Inc()
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return 0, fmt.Errorf("error fetching runners: %w", runnerErrors.ErrUnauthorized)
+ }
+ return 0, fmt.Errorf("error fetching runners: %w", err)
+ }
+ for _, runnerGroup := range runnerGroups.RunnerGroups {
+ if runnerGroup.Name != nil && *runnerGroup.Name == rgName {
+ return *runnerGroup.ID, nil
+ }
+ }
+ if ghResp.NextPage == 0 {
+ break
+ }
+ opts.Page = ghResp.NextPage
+ }
+ return 0, runnerErrors.NewNotFoundError("runner group %s not found", rgName)
+}
+
+func (g *githubClient) getEnterpriseRunnerGroupIDByName(ctx context.Context, entity params.ForgeEntity, rgName string) (int64, error) {
+ opts := github.ListEnterpriseRunnerGroupOptions{
+ ListOptions: github.ListOptions{
+ PerPage: 100,
+ },
+ }
+
+ for {
+ metrics.GithubOperationCount.WithLabelValues(
+ "ListRunnerGroups", // label: operation
+ entity.LabelScope(), // label: scope
+ ).Inc()
+ runnerGroups, ghResp, err := g.enterprise.ListRunnerGroups(ctx, entity.Owner, &opts)
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "ListRunnerGroups", // label: operation
+ entity.LabelScope(), // label: scope
+ ).Inc()
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return 0, fmt.Errorf("error fetching runners: %w", runnerErrors.ErrUnauthorized)
+ }
+ return 0, fmt.Errorf("error fetching runners: %w", err)
+ }
+ for _, runnerGroup := range runnerGroups.RunnerGroups {
+ if runnerGroup.Name != nil && *runnerGroup.Name == rgName {
+ return *runnerGroup.ID, nil
+ }
+ }
+ if ghResp.NextPage == 0 {
+ break
+ }
+ opts.Page = ghResp.NextPage
+ }
+ return 0, runnerErrors.NewNotFoundError("runner group not found")
+}
+
+func (g *githubClient) GetEntityRunnerGroupIDByName(ctx context.Context, runnerGroupName string) (int64, error) {
+ var rgID int64 = 1
+
+ if g.entity.EntityType == params.ForgeEntityTypeRepository {
+ // This is a repository. Runner groups are supported at the org and
+ // enterprise levels. Return the default runner group id, early.
+ return rgID, nil
+ }
+
+ var ok bool
+ var err error
+ // attempt to get the runner group ID from cache. Cache will invalidate after 1 hour.
+ if runnerGroupName != "" && !strings.EqualFold(runnerGroupName, "default") {
+ rgID, ok = cache.GetEntityRunnerGroup(g.entity.ID, runnerGroupName)
+ if !ok || rgID == 0 {
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeOrganization:
+ rgID, err = g.getOrganizationRunnerGroupIDByName(ctx, g.entity, runnerGroupName)
+ case params.ForgeEntityTypeEnterprise:
+ rgID, err = g.getEnterpriseRunnerGroupIDByName(ctx, g.entity, runnerGroupName)
+ }
+
+ if err != nil {
+ return 0, fmt.Errorf("getting runner group ID: %w", err)
+ }
+ }
+ // set cache. Avoid getting the same runner group for more than once an hour.
+ cache.SetEntityRunnerGroup(g.entity.ID, runnerGroupName, rgID)
+ }
+ return rgID, nil
+}
+
+func (g *githubClient) GetEntityJITConfig(ctx context.Context, instance string, pool params.Pool, labels []string) (jitConfigMap map[string]string, runner *github.Runner, err error) {
+ rgID, err := g.GetEntityRunnerGroupIDByName(ctx, pool.GitHubRunnerGroup)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to get runner group: %w", err)
+ }
+ slog.DebugContext(ctx, "using runner group", "group_name", pool.GitHubRunnerGroup, "runner_group_id", rgID)
+ req := github.GenerateJITConfigRequest{
+ Name: instance,
+ RunnerGroupID: rgID,
+ Labels: labels,
+ // nolint:golangci-lint,godox
+ // TODO(gabriel-samfira): Should we make this configurable?
+ WorkFolder: github.Ptr("_work"),
+ }
+
+ metrics.GithubOperationCount.WithLabelValues(
+ "GetEntityJITConfig", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+
+ var ret *github.JITRunnerConfig
+ var response *github.Response
+
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, response, err = g.GenerateRepoJITConfig(ctx, g.entity.Owner, g.entity.Name, &req)
+ case params.ForgeEntityTypeOrganization:
+ ret, response, err = g.GenerateOrgJITConfig(ctx, g.entity.Owner, &req)
+ case params.ForgeEntityTypeEnterprise:
+ ret, response, err = g.enterprise.GenerateEnterpriseJITConfig(ctx, g.entity.Owner, &req)
+ }
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "GetEntityJITConfig", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ if response != nil && response.StatusCode == http.StatusUnauthorized {
+ return nil, nil, fmt.Errorf("failed to get JIT config: %w", err)
+ }
+ return nil, nil, fmt.Errorf("failed to get JIT config: %w", err)
+ }
+
+ defer func(run *github.Runner) {
+ if err != nil && run != nil {
+ innerErr := g.RemoveEntityRunner(ctx, run.GetID())
+ slog.With(slog.Any("error", innerErr)).ErrorContext(
+ ctx, "failed to remove runner",
+ "runner_id", run.GetID(), string(g.entity.EntityType), g.entity.String())
+ }
+ }(ret.Runner)
+
+ decoded, err := base64.StdEncoding.DecodeString(*ret.EncodedJITConfig)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to decode JIT config: %w", err)
+ }
+
+ var jitConfig map[string]string
+ if err := json.Unmarshal(decoded, &jitConfig); err != nil {
+ return nil, nil, fmt.Errorf("failed to unmarshal JIT config: %w", err)
+ }
+
+ return jitConfig, ret.Runner, nil
+}
+
+func (g *githubClient) RateLimit(ctx context.Context) (*github.RateLimits, error) {
+ limits, resp, err := g.rateLimit.Get(ctx)
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "GetRateLimit", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ if err := parseError(resp, err); err != nil {
+ return nil, fmt.Errorf("getting rate limit: %w", err)
+ }
+ return limits, nil
+}
+
+func (g *githubClient) GetEntity() params.ForgeEntity {
+ return g.entity
+}
+
+func (g *githubClient) GithubBaseURL() *url.URL {
+ return g.cli.BaseURL
+}
+
+func NewRateLimitClient(ctx context.Context, credentials params.ForgeCredentials) (common.RateLimitClient, error) {
+ httpClient, err := credentials.GetHTTPClient(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching http client: %w", err)
+ }
+
+ slog.DebugContext(
+ ctx, "creating rate limit client",
+ "base_url", credentials.APIBaseURL,
+ "upload_url", credentials.UploadBaseURL)
+
+ ghClient, err := github.NewClient(httpClient).WithEnterpriseURLs(
+ credentials.APIBaseURL, credentials.UploadBaseURL)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github client: %w", err)
+ }
+ cli := &githubClient{
+ rateLimit: ghClient.RateLimit,
+ cli: ghClient,
+ }
+
+ return cli, nil
+}
+
+func withGiteaURLs(client *github.Client, apiBaseURL string) (*github.Client, error) {
+ if client == nil {
+ return nil, errors.New("client is nil")
+ }
+
+ if apiBaseURL == "" {
+ return nil, errors.New("invalid gitea URLs")
+ }
+
+ parsedBaseURL, err := url.ParseRequestURI(apiBaseURL)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing gitea base URL: %w", err)
+ }
+
+ if !strings.HasSuffix(parsedBaseURL.Path, "/") {
+ parsedBaseURL.Path += "/"
+ }
+
+ if !strings.HasSuffix(parsedBaseURL.Path, "/api/v1/") {
+ parsedBaseURL.Path += "api/v1/"
+ }
+
+ client.BaseURL = parsedBaseURL
+ client.UploadURL = parsedBaseURL
+
+ return client, nil
+}
+
+func Client(ctx context.Context, entity params.ForgeEntity) (common.GithubClient, error) {
+ // func GithubClient(ctx context.Context, entity params.ForgeEntity) (common.GithubClient, error) {
+ httpClient, err := entity.Credentials.GetHTTPClient(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error fetching http client: %w", err)
+ }
+
+ slog.DebugContext(
+ ctx, "creating client for entity",
+ "entity", entity.String(), "base_url", entity.Credentials.APIBaseURL,
+ "upload_url", entity.Credentials.UploadBaseURL)
+
+ ghClient := github.NewClient(httpClient)
+ switch entity.Credentials.ForgeType {
+ case params.GithubEndpointType:
+ ghClient, err = ghClient.WithEnterpriseURLs(entity.Credentials.APIBaseURL, entity.Credentials.UploadBaseURL)
+ case params.GiteaEndpointType:
+ ghClient, err = withGiteaURLs(ghClient, entity.Credentials.APIBaseURL)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("error fetching github client: %w", err)
+ }
+
+ cli := &githubClient{
+ ActionsService: ghClient.Actions,
+ org: ghClient.Organizations,
+ repo: ghClient.Repositories,
+ enterprise: ghClient.Enterprise,
+ rateLimit: ghClient.RateLimit,
+ cli: ghClient,
+ entity: entity,
+ }
+
+ return cli, nil
+}
diff --git a/util/github/gitea.go b/util/github/gitea.go
new file mode 100644
index 00000000..5d35190b
--- /dev/null
+++ b/util/github/gitea.go
@@ -0,0 +1,116 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package github
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/google/go-github/v72/github"
+
+ "github.com/cloudbase/garm/metrics"
+ "github.com/cloudbase/garm/params"
+)
+
+type createGiteaHookOptions struct {
+ Type string `json:"type"`
+ Config map[string]string `json:"config"`
+ Events []string `json:"events"`
+ BranchFilter string `json:"branch_filter"`
+ Active bool `json:"active"`
+ AuthorizationHeader string `json:"authorization_header"`
+}
+
+func (g *githubClient) createGiteaRepoHook(ctx context.Context, owner, name string, hook *github.Hook) (ret *github.Hook, err error) {
+ u := fmt.Sprintf("repos/%v/%v/hooks", owner, name)
+ createOpts := &createGiteaHookOptions{
+ Type: "gitea",
+ Events: hook.Events,
+ Active: hook.GetActive(),
+ BranchFilter: "*",
+ Config: map[string]string{
+ "content_type": hook.GetConfig().GetContentType(),
+ "url": hook.GetConfig().GetURL(),
+ "http_method": "post",
+ "secret": hook.GetConfig().GetSecret(),
+ },
+ }
+
+ req, err := g.cli.NewRequest(http.MethodPost, u, createOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ hook = new(github.Hook)
+ _, err = g.cli.Do(ctx, req, hook)
+ if err != nil {
+ return nil, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ return hook, nil
+}
+
+func (g *githubClient) createGiteaOrgHook(ctx context.Context, owner string, hook *github.Hook) (ret *github.Hook, err error) {
+ u := fmt.Sprintf("orgs/%v/hooks", owner)
+ createOpts := &createGiteaHookOptions{
+ Type: "gitea",
+ Events: hook.Events,
+ Active: hook.GetActive(),
+ BranchFilter: "*",
+ Config: map[string]string{
+ "content_type": hook.GetConfig().GetContentType(),
+ "url": hook.GetConfig().GetURL(),
+ "http_method": "post",
+ "secret": hook.GetConfig().GetSecret(),
+ },
+ }
+
+ req, err := g.cli.NewRequest(http.MethodPost, u, createOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ hook = new(github.Hook)
+ _, err = g.cli.Do(ctx, req, hook)
+ if err != nil {
+ return nil, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ return hook, nil
+}
+
+func (g *githubClient) createGiteaEntityHook(ctx context.Context, hook *github.Hook) (ret *github.Hook, err error) {
+ metrics.GithubOperationCount.WithLabelValues(
+ "CreateHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ defer func() {
+ if err != nil {
+ metrics.GithubOperationFailedCount.WithLabelValues(
+ "CreateHook", // label: operation
+ g.entity.LabelScope(), // label: scope
+ ).Inc()
+ }
+ }()
+ switch g.entity.EntityType {
+ case params.ForgeEntityTypeRepository:
+ ret, err = g.createGiteaRepoHook(ctx, g.entity.Owner, g.entity.Name, hook)
+ case params.ForgeEntityTypeOrganization:
+ ret, err = g.createGiteaOrgHook(ctx, g.entity.Owner, hook)
+ default:
+ return nil, errors.New("invalid entity type")
+ }
+ return ret, err
+}
diff --git a/util/github/scalesets/client.go b/util/github/scalesets/client.go
new file mode 100644
index 00000000..6b4b1bab
--- /dev/null
+++ b/util/github/scalesets/client.go
@@ -0,0 +1,104 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "sync"
+
+ "github.com/google/go-github/v72/github"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+ "github.com/cloudbase/garm/runner/common"
+)
+
+func NewClient(cli common.GithubClient) (*ScaleSetClient, error) {
+ return &ScaleSetClient{
+ ghCli: cli,
+ httpClient: &http.Client{},
+ }, nil
+}
+
+type ScaleSetClient struct {
+ ghCli common.GithubClient
+ httpClient *http.Client
+
+ // scale sets are aparently available through the same security
+ // contex that a normal runner would use. We connect to the same
+ // API endpoint a runner would connect to, in order to fetch jobs.
+ // To do this, we use a runner registration token.
+ runnerRegistrationToken *github.RegistrationToken
+ // actionsServiceInfo holds the pipeline URL and the JWT token to
+ // access it. The pipeline URL is the base URL where we can access
+ // the scale set endpoints.
+ actionsServiceInfo *params.ActionsServiceAdminInfoResponse
+
+ mux sync.Mutex
+}
+
+func (s *ScaleSetClient) SetGithubClient(cli common.GithubClient) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+ s.ghCli = cli
+}
+
+func (s *ScaleSetClient) GetGithubClient() (common.GithubClient, error) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+ if s.ghCli == nil {
+ return nil, fmt.Errorf("github client is not set in scaleset client")
+ }
+ return s.ghCli, nil
+}
+
+func (s *ScaleSetClient) Do(req *http.Request) (*http.Response, error) {
+ if s.httpClient == nil {
+ return nil, fmt.Errorf("http client is not initialized")
+ }
+
+ resp, err := s.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to dispatch HTTP request: %w", err)
+ }
+
+ if resp.StatusCode >= 200 && resp.StatusCode < 300 {
+ return resp, nil
+ }
+
+ var body []byte
+ if resp != nil {
+ defer resp.Body.Close()
+ body, err = io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read body: %w", err)
+ }
+ }
+
+ switch resp.StatusCode {
+ case 404:
+ return nil, runnerErrors.NewNotFoundError("resource %s not found: %q", req.URL.String(), string(body))
+ case 400:
+ return nil, runnerErrors.NewBadRequestError("bad request while calling %s: %q", req.URL.String(), string(body))
+ case 409:
+ return nil, runnerErrors.NewConflictError("conflict while calling %s: %q", req.URL.String(), string(body))
+ case 401, 403:
+ return nil, runnerErrors.ErrUnauthorized
+ default:
+ return nil, fmt.Errorf("request to %s failed with status code %d: %q", req.URL.String(), resp.StatusCode, string(body))
+ }
+}
diff --git a/util/github/scalesets/jobs.go b/util/github/scalesets/jobs.go
new file mode 100644
index 00000000..defc9506
--- /dev/null
+++ b/util/github/scalesets/jobs.go
@@ -0,0 +1,88 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/cloudbase/garm/params"
+)
+
+type acquireJobsResult struct {
+ Count int `json:"count"`
+ Value []int64 `json:"value"`
+}
+
+func (s *ScaleSetClient) AcquireJobs(ctx context.Context, runnerScaleSetID int, messageQueueAccessToken string, requestIDs []int64) ([]int64, error) {
+ u := fmt.Sprintf("%s/%d/acquirejobs?api-version=6.0-preview", scaleSetEndpoint, runnerScaleSetID)
+
+ body, err := json.Marshal(requestIDs)
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := s.newActionsRequest(ctx, http.MethodPost, u, bytes.NewBuffer(body))
+ if err != nil {
+ return nil, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", messageQueueAccessToken))
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var acquiredJobs acquireJobsResult
+ err = json.NewDecoder(resp.Body).Decode(&acquiredJobs)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return acquiredJobs.Value, nil
+}
+
+func (s *ScaleSetClient) GetAcquirableJobs(ctx context.Context, runnerScaleSetID int) (params.AcquirableJobList, error) {
+ path := fmt.Sprintf("%d/acquirablejobs", runnerScaleSetID)
+
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.AcquirableJobList{}, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.AcquirableJobList{}, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusNoContent {
+ return params.AcquirableJobList{Count: 0, Jobs: []params.AcquirableJob{}}, nil
+ }
+
+ var acquirableJobList params.AcquirableJobList
+ err = json.NewDecoder(resp.Body).Decode(&acquirableJobList)
+ if err != nil {
+ return params.AcquirableJobList{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return acquirableJobList, nil
+}
diff --git a/util/github/scalesets/message_sessions.go b/util/github/scalesets/message_sessions.go
new file mode 100644
index 00000000..8fafc2c4
--- /dev/null
+++ b/util/github/scalesets/message_sessions.go
@@ -0,0 +1,291 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "math/big"
+ "net/http"
+ "net/url"
+ "strconv"
+ "sync"
+ "time"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+ garmUtil "github.com/cloudbase/garm/util"
+)
+
+const maxCapacityHeader = "X-ScaleSetMaxCapacity"
+
+type MessageSession struct {
+ ssCli *ScaleSetClient
+ session *params.RunnerScaleSetSession
+ ctx context.Context
+
+ done chan struct{}
+ closed bool
+ lastErr error
+
+ mux sync.Mutex
+}
+
+func (m *MessageSession) Close() error {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+ if m.closed {
+ return nil
+ }
+ close(m.done)
+ m.closed = true
+ return nil
+}
+
+func (m *MessageSession) MessageQueueAccessToken() string {
+ return m.session.MessageQueueAccessToken
+}
+
+func (m *MessageSession) LastError() error {
+ return m.lastErr
+}
+
+func (m *MessageSession) loop() {
+ slog.DebugContext(m.ctx, "starting message session refresh loop", "session_id", m.session.SessionID.String())
+ timer := time.NewTicker(1 * time.Minute)
+ defer timer.Stop()
+ defer m.Close()
+
+ if m.closed {
+ slog.DebugContext(m.ctx, "message session refresh loop closed")
+ return
+ }
+ for {
+ select {
+ case <-m.ctx.Done():
+ slog.DebugContext(m.ctx, "message session refresh loop context done")
+ return
+ case <-m.done:
+ slog.DebugContext(m.ctx, "message session refresh loop done")
+ return
+ case <-timer.C:
+ if err := m.maybeRefreshToken(m.ctx); err != nil {
+ // We endlessly retry. If it's a transient error, it should eventually
+ // work, if it's credentials issues, users can update them.
+ slog.With(slog.Any("error", err)).ErrorContext(m.ctx, "failed to refresh message queue token")
+ m.lastErr = err
+ continue
+ }
+ m.lastErr = nil
+ }
+ }
+}
+
+func (m *MessageSession) SessionsRelativeURL() (string, error) {
+ if m.session == nil {
+ return "", fmt.Errorf("session is nil")
+ }
+ if m.session.RunnerScaleSet == nil {
+ return "", fmt.Errorf("runner scale set is nil")
+ }
+ relativePath := fmt.Sprintf("%s/%d/sessions/%s", scaleSetEndpoint, m.session.RunnerScaleSet.ID, m.session.SessionID.String())
+ return relativePath, nil
+}
+
+func (m *MessageSession) Refresh(ctx context.Context) error {
+ slog.DebugContext(ctx, "refreshing message session token", "session_id", m.session.SessionID.String())
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ relPath, err := m.SessionsRelativeURL()
+ if err != nil {
+ return fmt.Errorf("failed to get session URL: %w", err)
+ }
+ req, err := m.ssCli.newActionsRequest(ctx, http.MethodPatch, relPath, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create message delete request: %w", err)
+ }
+ resp, err := m.ssCli.Do(req)
+ if err != nil {
+ return fmt.Errorf("failed to delete message session: %w", err)
+ }
+ defer resp.Body.Close()
+
+ var refreshedSession params.RunnerScaleSetSession
+ if err := json.NewDecoder(resp.Body).Decode(&refreshedSession); err != nil {
+ return fmt.Errorf("failed to decode response: %w", err)
+ }
+ slog.DebugContext(ctx, "refreshed message session token")
+ m.session = &refreshedSession
+ return nil
+}
+
+func (m *MessageSession) maybeRefreshToken(ctx context.Context) error {
+ if m.session == nil {
+ return fmt.Errorf("session is nil")
+ }
+
+ expiresAt, err := m.session.ExiresAt()
+ if err != nil {
+ return fmt.Errorf("failed to get expires at: %w", err)
+ }
+ // add some jitter (30 second interval)
+ randInt, err := rand.Int(rand.Reader, big.NewInt(30))
+ if err != nil {
+ return fmt.Errorf("failed to get a random number")
+ }
+ expiresIn := time.Duration(randInt.Int64())*time.Second + 10*time.Minute
+ slog.DebugContext(ctx, "checking if message session token needs refresh", "expires_at", expiresAt)
+ if m.session.ExpiresIn(expiresIn) {
+ if err := m.Refresh(ctx); err != nil {
+ return fmt.Errorf("failed to refresh message queue token: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func (m *MessageSession) GetMessage(ctx context.Context, lastMessageID int64, maxCapacity uint) (params.RunnerScaleSetMessage, error) {
+ u, err := url.Parse(m.session.MessageQueueURL)
+ if err != nil {
+ return params.RunnerScaleSetMessage{}, err
+ }
+
+ if lastMessageID > 0 {
+ q := u.Query()
+ q.Set("lastMessageId", strconv.FormatInt(lastMessageID, 10))
+ u.RawQuery = q.Encode()
+ }
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
+ if err != nil {
+ return params.RunnerScaleSetMessage{}, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ req.Header.Set("Accept", "application/json; api-version=6.0-preview")
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", m.session.MessageQueueAccessToken))
+ req.Header.Set(maxCapacityHeader, fmt.Sprintf("%d", maxCapacity))
+
+ resp, err := m.ssCli.Do(req)
+ if err != nil {
+ return params.RunnerScaleSetMessage{}, fmt.Errorf("request to %s failed: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusAccepted {
+ slog.DebugContext(ctx, "no messages available in queue")
+ return params.RunnerScaleSetMessage{}, nil
+ }
+
+ var message params.RunnerScaleSetMessage
+ if err := json.NewDecoder(resp.Body).Decode(&message); err != nil {
+ return params.RunnerScaleSetMessage{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return message, nil
+}
+
+func (m *MessageSession) DeleteMessage(ctx context.Context, messageID int64) error {
+ u, err := url.Parse(m.session.MessageQueueURL)
+ if err != nil {
+ return err
+ }
+
+ u.Path = fmt.Sprintf("%s/%d", u.Path, messageID)
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodDelete, u.String(), nil)
+ if err != nil {
+ return err
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", m.session.MessageQueueAccessToken))
+
+ resp, err := m.ssCli.Do(req)
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+
+ return nil
+}
+
+func (s *ScaleSetClient) CreateMessageSession(ctx context.Context, runnerScaleSetID int, owner string) (*MessageSession, error) {
+ path := fmt.Sprintf("%s/%d/sessions", scaleSetEndpoint, runnerScaleSetID)
+
+ newSession := params.RunnerScaleSetSession{
+ OwnerName: owner,
+ }
+
+ requestData, err := json.Marshal(newSession)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal session data: %w", err)
+ }
+
+ req, err := s.newActionsRequest(ctx, http.MethodPost, path, bytes.NewBuffer(requestData))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to execute request to %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var createdSession params.RunnerScaleSetSession
+ if err := json.NewDecoder(resp.Body).Decode(&createdSession); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ msgSessionCtx := garmUtil.WithSlogContext(
+ ctx,
+ slog.Any("session_id", createdSession.SessionID.String()))
+ sess := &MessageSession{
+ ssCli: s,
+ session: &createdSession,
+ ctx: msgSessionCtx,
+ done: make(chan struct{}),
+ closed: false,
+ }
+ go sess.loop()
+
+ return sess, nil
+}
+
+func (s *ScaleSetClient) DeleteMessageSession(ctx context.Context, session *MessageSession) error {
+ path, err := session.SessionsRelativeURL()
+ if err != nil {
+ return fmt.Errorf("failed to delete session: %w", err)
+ }
+
+ req, err := s.newActionsRequest(ctx, http.MethodDelete, path, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create message delete request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ if !errors.Is(err, runnerErrors.ErrNotFound) {
+ return fmt.Errorf("failed to delete message session: %w", err)
+ }
+ }
+ defer resp.Body.Close()
+ return nil
+}
diff --git a/util/github/scalesets/runners.go b/util/github/scalesets/runners.go
new file mode 100644
index 00000000..79c321bc
--- /dev/null
+++ b/util/github/scalesets/runners.go
@@ -0,0 +1,154 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+type scaleSetJitRunnerConfig struct {
+ Name string `json:"name"`
+ WorkFolder string `json:"workFolder"`
+}
+
+func (s *ScaleSetClient) GenerateJitRunnerConfig(ctx context.Context, runnerName string, scaleSetID int) (params.RunnerScaleSetJitRunnerConfig, error) {
+ runnerSettings := scaleSetJitRunnerConfig{
+ Name: runnerName,
+ WorkFolder: "_work",
+ }
+
+ body, err := json.Marshal(runnerSettings)
+ if err != nil {
+ return params.RunnerScaleSetJitRunnerConfig{}, err
+ }
+
+ if err := s.ensureAdminInfo(ctx); err != nil {
+ return params.RunnerScaleSetJitRunnerConfig{}, fmt.Errorf("failed to ensure admin info: %w", err)
+ }
+
+ jitConfigPath := fmt.Sprintf("%s/%d/generatejitconfig", scaleSetEndpoint, scaleSetID)
+ req, err := s.newActionsRequest(ctx, http.MethodPost, jitConfigPath, bytes.NewBuffer(body))
+ if err != nil {
+ return params.RunnerScaleSetJitRunnerConfig{}, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerScaleSetJitRunnerConfig{}, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var runnerJitConfig params.RunnerScaleSetJitRunnerConfig
+ if err := json.NewDecoder(resp.Body).Decode(&runnerJitConfig); err != nil {
+ return params.RunnerScaleSetJitRunnerConfig{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return runnerJitConfig, nil
+}
+
+func (s *ScaleSetClient) GetRunner(ctx context.Context, runnerID int64) (params.RunnerReference, error) {
+ path := fmt.Sprintf("%s/%d", runnerEndpoint, runnerID)
+
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.RunnerReference{}, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerReference{}, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var runnerReference params.RunnerReference
+ if err := json.NewDecoder(resp.Body).Decode(&runnerReference); err != nil {
+ return params.RunnerReference{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return runnerReference, nil
+}
+
+func (s *ScaleSetClient) ListAllRunners(ctx context.Context) (params.RunnerReferenceList, error) {
+ req, err := s.newActionsRequest(ctx, http.MethodGet, runnerEndpoint, nil)
+ if err != nil {
+ return params.RunnerReferenceList{}, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerReferenceList{}, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var runnerList params.RunnerReferenceList
+ if err := json.NewDecoder(resp.Body).Decode(&runnerList); err != nil {
+ return params.RunnerReferenceList{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return runnerList, nil
+}
+
+func (s *ScaleSetClient) GetRunnerByName(ctx context.Context, runnerName string) (params.RunnerReference, error) {
+ path := fmt.Sprintf("%s?agentName=%s", runnerEndpoint, runnerName)
+
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.RunnerReference{}, fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerReference{}, fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+ defer resp.Body.Close()
+
+ var runnerList params.RunnerReferenceList
+ if err := json.NewDecoder(resp.Body).Decode(&runnerList); err != nil {
+ return params.RunnerReference{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ if runnerList.Count == 0 {
+ return params.RunnerReference{}, fmt.Errorf("could not find runner with name %q: %w", runnerName, runnerErrors.ErrNotFound)
+ }
+
+ if runnerList.Count > 1 {
+ return params.RunnerReference{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return runnerList.RunnerReferences[0], nil
+}
+
+func (s *ScaleSetClient) RemoveRunner(ctx context.Context, runnerID int64) error {
+ path := fmt.Sprintf("%s/%d", runnerEndpoint, runnerID)
+
+ req, err := s.newActionsRequest(ctx, http.MethodDelete, path, nil)
+ if err != nil {
+ return fmt.Errorf("failed to construct request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return fmt.Errorf("request failed for %s: %w", req.URL.String(), err)
+ }
+
+ resp.Body.Close()
+ return nil
+}
diff --git a/util/github/scalesets/scalesets.go b/util/github/scalesets/scalesets.go
new file mode 100644
index 00000000..2aae493a
--- /dev/null
+++ b/util/github/scalesets/scalesets.go
@@ -0,0 +1,209 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httputil"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm/params"
+)
+
+const (
+ runnerEndpoint = "_apis/distributedtask/pools/0/agents"
+ scaleSetEndpoint = "_apis/runtime/runnerscalesets"
+)
+
+const (
+ HeaderActionsActivityID = "ActivityId"
+ HeaderGitHubRequestID = "X-GitHub-Request-Id"
+)
+
+func (s *ScaleSetClient) GetRunnerScaleSetByNameAndRunnerGroup(ctx context.Context, runnerGroupID int, name string) (params.RunnerScaleSet, error) {
+ path := fmt.Sprintf("%s?runnerGroupId=%d&name=%s", scaleSetEndpoint, runnerGroupID, name)
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.RunnerScaleSet{}, err
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerScaleSet{}, err
+ }
+ defer resp.Body.Close()
+
+ var runnerScaleSetList *params.RunnerScaleSetsResponse
+ if err := json.NewDecoder(resp.Body).Decode(&runnerScaleSetList); err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ if runnerScaleSetList.Count == 0 {
+ return params.RunnerScaleSet{}, runnerErrors.NewNotFoundError("runner scale set with name %s and runner group ID %d was not found", name, runnerGroupID)
+ }
+
+ // Runner scale sets must have a uniqe name. Attempting to create a runner scale set with the same name as
+ // an existing scale set will result in a Bad Request (400) error.
+ return runnerScaleSetList.RunnerScaleSets[0], nil
+}
+
+func (s *ScaleSetClient) GetRunnerScaleSetByID(ctx context.Context, runnerScaleSetID int) (params.RunnerScaleSet, error) {
+ path := fmt.Sprintf("%s/%d", scaleSetEndpoint, runnerScaleSetID)
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.RunnerScaleSet{}, err
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to get runner scaleset with ID %d: %w", runnerScaleSetID, err)
+ }
+ defer resp.Body.Close()
+
+ var runnerScaleSet params.RunnerScaleSet
+ if err := json.NewDecoder(resp.Body).Decode(&runnerScaleSet); err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return runnerScaleSet, nil
+}
+
+// ListRunnerScaleSets lists all runner scale sets in a github entity.
+func (s *ScaleSetClient) ListRunnerScaleSets(ctx context.Context) (*params.RunnerScaleSetsResponse, error) {
+ req, err := s.newActionsRequest(ctx, http.MethodGet, scaleSetEndpoint, nil)
+ if err != nil {
+ return nil, err
+ }
+ data, err := httputil.DumpRequest(req, false)
+ if err == nil {
+ fmt.Println(string(data))
+ }
+ resp, err := s.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list runner scale sets: %w", err)
+ }
+ defer resp.Body.Close()
+
+ var runnerScaleSetList params.RunnerScaleSetsResponse
+ if err := json.NewDecoder(resp.Body).Decode(&runnerScaleSetList); err != nil {
+ return nil, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return &runnerScaleSetList, nil
+}
+
+// CreateRunnerScaleSet creates a new runner scale set in the target GitHub entity.
+func (s *ScaleSetClient) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *params.RunnerScaleSet) (params.RunnerScaleSet, error) {
+ body, err := json.Marshal(runnerScaleSet)
+ if err != nil {
+ return params.RunnerScaleSet{}, err
+ }
+
+ req, err := s.newActionsRequest(ctx, http.MethodPost, scaleSetEndpoint, bytes.NewReader(body))
+ if err != nil {
+ return params.RunnerScaleSet{}, err
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to create runner scale set: %w", err)
+ }
+ defer resp.Body.Close()
+
+ var createdRunnerScaleSet params.RunnerScaleSet
+ if err := json.NewDecoder(resp.Body).Decode(&createdRunnerScaleSet); err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return createdRunnerScaleSet, nil
+}
+
+func (s *ScaleSetClient) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetID int, runnerScaleSet params.RunnerScaleSet) (params.RunnerScaleSet, error) {
+ path := fmt.Sprintf("%s/%d", scaleSetEndpoint, runnerScaleSetID)
+
+ body, err := json.Marshal(runnerScaleSet)
+ if err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to marshal request: %w", err)
+ }
+
+ req, err := s.newActionsRequest(ctx, http.MethodPatch, path, bytes.NewReader(body))
+ if err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to make request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ var ret params.RunnerScaleSet
+ if err := json.NewDecoder(resp.Body).Decode(&ret); err != nil {
+ return params.RunnerScaleSet{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+ return ret, nil
+}
+
+func (s *ScaleSetClient) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetID int) error {
+ path := fmt.Sprintf("%s/%d", scaleSetEndpoint, runnerScaleSetID)
+ req, err := s.newActionsRequest(ctx, http.MethodDelete, path, nil)
+ if err != nil {
+ return err
+ }
+
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusNoContent {
+ return fmt.Errorf("failed to delete scale set with code %d", resp.StatusCode)
+ }
+
+ return nil
+}
+
+func (s *ScaleSetClient) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (params.RunnerGroup, error) {
+ path := fmt.Sprintf("_apis/runtime/runnergroups/?groupName=%s", runnerGroup)
+ req, err := s.newActionsRequest(ctx, http.MethodGet, path, nil)
+ if err != nil {
+ return params.RunnerGroup{}, err
+ }
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.RunnerGroup{}, fmt.Errorf("failed to make request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ var runnerGroupList params.RunnerGroupList
+ err = json.NewDecoder(resp.Body).Decode(&runnerGroupList)
+ if err != nil {
+ return params.RunnerGroup{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ if runnerGroupList.Count == 0 {
+ return params.RunnerGroup{}, runnerErrors.NewNotFoundError("runner group %s does not exist", runnerGroup)
+ }
+
+ if runnerGroupList.Count > 1 {
+ return params.RunnerGroup{}, runnerErrors.NewConflictError("multiple runner groups exist with the same name (%s)", runnerGroup)
+ }
+
+ return runnerGroupList.RunnerGroups[0], nil
+}
diff --git a/util/github/scalesets/token.go b/util/github/scalesets/token.go
new file mode 100644
index 00000000..1491b748
--- /dev/null
+++ b/util/github/scalesets/token.go
@@ -0,0 +1,105 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/cloudbase/garm/params"
+)
+
+func (s *ScaleSetClient) getActionServiceInfo(ctx context.Context) (params.ActionsServiceAdminInfoResponse, error) {
+ regPath := "/actions/runner-registration"
+ baseURL := s.ghCli.GithubBaseURL()
+ url, err := baseURL.Parse(regPath)
+ if err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, fmt.Errorf("failed to parse url: %w", err)
+ }
+
+ entity := s.ghCli.GetEntity()
+ body := params.ActionsServiceAdminInfoRequest{
+ URL: entity.ForgeURL(),
+ RunnerEvent: "register",
+ }
+
+ buf := &bytes.Buffer{}
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+
+ if err := enc.Encode(body); err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, err
+ }
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, url.String(), buf)
+ if err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", fmt.Sprintf("RemoteAuth %s", *s.runnerRegistrationToken.Token))
+
+ resp, err := s.Do(req)
+ if err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, fmt.Errorf("failed to get actions service admin info: %w", err)
+ }
+ defer resp.Body.Close()
+
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, fmt.Errorf("failed to read response body: %w", err)
+ }
+ data = bytes.TrimPrefix(data, []byte("\xef\xbb\xbf"))
+
+ var info params.ActionsServiceAdminInfoResponse
+ if err := json.Unmarshal(data, &info); err != nil {
+ return params.ActionsServiceAdminInfoResponse{}, fmt.Errorf("failed to decode response: %w", err)
+ }
+
+ return info, nil
+}
+
+func (s *ScaleSetClient) ensureAdminInfo(ctx context.Context) error {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ var expiresAt time.Time
+ if s.runnerRegistrationToken != nil {
+ expiresAt = s.runnerRegistrationToken.GetExpiresAt().Time
+ }
+
+ now := time.Now().UTC().Add(2 * time.Minute)
+ if now.After(expiresAt) || s.runnerRegistrationToken == nil {
+ token, _, err := s.ghCli.CreateEntityRegistrationToken(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to fetch runner registration token: %w", err)
+ }
+ s.runnerRegistrationToken = token
+ }
+
+ if s.actionsServiceInfo == nil || s.actionsServiceInfo.ExpiresIn(2*time.Minute) {
+ info, err := s.getActionServiceInfo(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to get action service info: %w", err)
+ }
+ s.actionsServiceInfo = &info
+ }
+
+ return nil
+}
diff --git a/util/github/scalesets/util.go b/util/github/scalesets/util.go
new file mode 100644
index 00000000..e8387e63
--- /dev/null
+++ b/util/github/scalesets/util.go
@@ -0,0 +1,65 @@
+// Copyright 2024 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package scalesets
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+)
+
+func (s *ScaleSetClient) newActionsRequest(ctx context.Context, method, uriPath string, body io.Reader) (*http.Request, error) {
+ if err := s.ensureAdminInfo(ctx); err != nil {
+ return nil, fmt.Errorf("failed to update token: %w", err)
+ }
+
+ actionsURI, err := s.actionsServiceInfo.GetURL()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get pipeline URL: %w", err)
+ }
+
+ pathURI, err := url.Parse(uriPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse path: %w", err)
+ }
+ pathQuery := pathURI.Query()
+ baseQuery := actionsURI.Query()
+ for k, values := range pathQuery {
+ if baseQuery.Get(k) == "" {
+ for _, val := range values {
+ baseQuery.Add(k, val)
+ }
+ }
+ }
+ if baseQuery.Get("api-version") == "" {
+ baseQuery.Set("api-version", "6.0-preview")
+ }
+
+ actionsURI.Path = path.Join(actionsURI.Path, pathURI.Path)
+ actionsURI.RawQuery = baseQuery.Encode()
+
+ req, err := http.NewRequestWithContext(ctx, method, actionsURI.String(), body)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", s.actionsServiceInfo.Token))
+
+ return req, nil
+}
diff --git a/util/logging.go b/util/logging.go
new file mode 100644
index 00000000..99c69da7
--- /dev/null
+++ b/util/logging.go
@@ -0,0 +1,82 @@
+// Copyright 2025 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package util
+
+import (
+ "context"
+ "log/slog"
+)
+
+type slogContextKey string
+
+const (
+ slogCtxFields slogContextKey = "slog_ctx_fields"
+)
+
+var _ slog.Handler = &SlogMultiHandler{}
+
+func WithSlogContext(ctx context.Context, attrs ...slog.Attr) context.Context {
+ return context.WithValue(ctx, slogCtxFields, attrs)
+}
+
+type SlogMultiHandler struct {
+ Handlers []slog.Handler
+}
+
+func (m *SlogMultiHandler) Enabled(ctx context.Context, level slog.Level) bool {
+ // Enabled if any handler is enabled
+ for _, h := range m.Handlers {
+ if h.Enabled(ctx, level) {
+ return true
+ }
+ }
+ return false
+}
+
+func (m *SlogMultiHandler) Handle(ctx context.Context, r slog.Record) error {
+ record := r.Clone()
+ attrs, ok := ctx.Value(slogCtxFields).([]slog.Attr)
+ if ok {
+ for _, v := range attrs {
+ record.AddAttrs(v)
+ }
+ }
+
+ var firstErr error
+ for _, h := range m.Handlers {
+ if err := h.Handle(ctx, record); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ return firstErr
+}
+
+func (m *SlogMultiHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ hs := make([]slog.Handler, len(m.Handlers))
+ for i, h := range m.Handlers {
+ hs[i] = h.WithAttrs(attrs)
+ }
+ return &SlogMultiHandler{
+ Handlers: hs,
+ }
+}
+
+func (m *SlogMultiHandler) WithGroup(name string) slog.Handler {
+ hs := make([]slog.Handler, len(m.Handlers))
+ for i, h := range m.Handlers {
+ hs[i] = h.WithGroup(name)
+ }
+ return &SlogMultiHandler{hs}
+}
diff --git a/util/util.go b/util/util.go
index eddbb1a1..dc92ce0e 100644
--- a/util/util.go
+++ b/util/util.go
@@ -15,483 +15,98 @@
package util
import (
- "bytes"
- "compress/gzip"
"context"
- "crypto/aes"
- "crypto/cipher"
- "crypto/rand"
- "crypto/tls"
- "crypto/x509"
- "encoding/base64"
- "encoding/binary"
"fmt"
- "io"
- "math/big"
"net/http"
- "os"
- "path"
- "regexp"
- "strings"
- "unicode"
- "unicode/utf16"
+ "unicode/utf8"
- "github.com/cloudbase/garm/cloudconfig"
- "github.com/cloudbase/garm/config"
- runnerErrors "github.com/cloudbase/garm/errors"
- "github.com/cloudbase/garm/params"
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ commonParams "github.com/cloudbase/garm-provider-common/params"
"github.com/cloudbase/garm/runner/common"
- "github.com/cloudbase/garm/util/appdefaults"
-
- "github.com/google/go-github/v48/github"
- "github.com/google/uuid"
- gorillaHandlers "github.com/gorilla/handlers"
- "github.com/pkg/errors"
- "github.com/teris-io/shortid"
- "golang.org/x/crypto/bcrypt"
- "golang.org/x/oauth2"
- lumberjack "gopkg.in/natefinch/lumberjack.v2"
)
-const alphanumeric = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
-
-// From: https://www.alexedwards.net/blog/validation-snippets-for-go#email-validation
-var rxEmail = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
-
-var (
- OSToOSTypeMap map[string]params.OSType = map[string]params.OSType{
- "almalinux": params.Linux,
- "alma": params.Linux,
- "alpine": params.Linux,
- "archlinux": params.Linux,
- "arch": params.Linux,
- "centos": params.Linux,
- "ubuntu": params.Linux,
- "rhel": params.Linux,
- "suse": params.Linux,
- "opensuse": params.Linux,
- "fedora": params.Linux,
- "debian": params.Linux,
- "flatcar": params.Linux,
- "gentoo": params.Linux,
- "rockylinux": params.Linux,
- "rocky": params.Linux,
- "windows": params.Windows,
+func FetchTools(ctx context.Context, cli common.GithubClient) ([]commonParams.RunnerApplicationDownload, error) {
+ tools, ghResp, err := cli.ListEntityRunnerApplicationDownloads(ctx)
+ if err != nil {
+ if ghResp != nil && ghResp.StatusCode == http.StatusUnauthorized {
+ return nil, fmt.Errorf("error fetching tools: %w", runnerErrors.ErrUnauthorized)
+ }
+ return nil, fmt.Errorf("error fetching runner tools: %w", err)
}
- githubArchMapping map[string]string = map[string]string{
- "x86_64": "x64",
- "amd64": "x64",
- "armv7l": "arm",
- "aarch64": "arm64",
- "x64": "x64",
- "arm": "arm",
- "arm64": "arm64",
+ ret := []commonParams.RunnerApplicationDownload{}
+ for _, tool := range tools {
+ if tool == nil {
+ continue
+ }
+ ret = append(ret, commonParams.RunnerApplicationDownload(*tool))
}
-
- githubOSTypeMap map[string]string = map[string]string{
- "linux": "linux",
- "windows": "win",
- }
-
- //
- githubOSTag = map[params.OSType]string{
- params.Linux: "Linux",
- params.Windows: "Windows",
- }
-)
-
-// ResolveToGithubArch returns the cpu architecture as it is defined in the GitHub
-// tools download list. We use it to find the proper tools for the OS/Arch combo we're
-// deploying.
-func ResolveToGithubArch(arch string) (string, error) {
- ghArch, ok := githubArchMapping[arch]
- if !ok {
- return "", runnerErrors.NewNotFoundError("arch %s is unknown", arch)
- }
-
- return ghArch, nil
+ return ret, nil
}
-// ResolveToGithubArch returns the OS type as it is defined in the GitHub
-// tools download list. We use it to find the proper tools for the OS/Arch combo we're
-// deploying.
-func ResolveToGithubOSType(osType string) (string, error) {
- ghOS, ok := githubOSTypeMap[osType]
- if !ok {
- return "", runnerErrors.NewNotFoundError("os %s is unknown", osType)
+func ASCIIEqualFold(s, t string) bool {
+ // Fast ASCII path for equal-length ASCII strings
+ if len(s) == len(t) && isASCII(s) && isASCII(t) {
+ for i := 0; i < len(s); i++ {
+ a, b := s[i], t[i]
+ if a != b {
+ if 'A' <= a && a <= 'Z' {
+ a = a + 'a' - 'A'
+ }
+ if 'A' <= b && b <= 'Z' {
+ b = b + 'a' - 'A'
+ }
+ if a != b {
+ return false
+ }
+ }
+ }
+ return true
}
- return ghOS, nil
-}
+ // UTF-8 path - handle different byte lengths correctly
+ i, j := 0, 0
+ for i < len(s) && j < len(t) {
+ sr, sizeS := utf8.DecodeRuneInString(s[i:])
+ tr, sizeT := utf8.DecodeRuneInString(t[j:])
-// ResolveToGithubTag returns the default OS tag that self hosted runners automatically
-// (and forcefully) adds to every runner that gets deployed. We need to keep track of those
-// tags internally as well.
-func ResolveToGithubTag(os params.OSType) (string, error) {
- ghOS, ok := githubOSTag[os]
- if !ok {
- return "", runnerErrors.NewNotFoundError("os %s is unknown", os)
+ // Handle invalid UTF-8 - they must be identical
+ if sr == utf8.RuneError || tr == utf8.RuneError {
+ // For invalid UTF-8, compare the raw bytes
+ if sr == utf8.RuneError && tr == utf8.RuneError {
+ if sizeS == sizeT && s[i:i+sizeS] == t[j:j+sizeT] {
+ i += sizeS
+ j += sizeT
+ continue
+ }
+ }
+ return false
+ }
+
+ if sr != tr {
+ // Apply ASCII case folding only
+ if 'A' <= sr && sr <= 'Z' {
+ sr = sr + 'a' - 'A'
+ }
+ if 'A' <= tr && tr <= 'Z' {
+ tr = tr + 'a' - 'A'
+ }
+ if sr != tr {
+ return false
+ }
+ }
+
+ i += sizeS
+ j += sizeT
}
-
- return ghOS, nil
+ return i == len(s) && j == len(t)
}
-// IsValidEmail returs a bool indicating if an email is valid
-func IsValidEmail(email string) bool {
- if len(email) > 254 || !rxEmail.MatchString(email) {
- return false
- }
- return true
-}
-
-func IsAlphanumeric(s string) bool {
- for _, r := range s {
- if !unicode.IsLetter(r) && !unicode.IsNumber(r) {
+func isASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= 0x80 {
return false
}
}
return true
}
-
-// GetLoggingWriter returns a new io.Writer suitable for logging.
-func GetLoggingWriter(cfg *config.Config) (io.Writer, error) {
- var writer io.Writer = os.Stdout
- if cfg.Default.LogFile != "" {
- dirname := path.Dir(cfg.Default.LogFile)
- if _, err := os.Stat(dirname); err != nil {
- if !os.IsNotExist(err) {
- return nil, fmt.Errorf("failed to create log folder")
- }
- if err := os.MkdirAll(dirname, 0o711); err != nil {
- return nil, fmt.Errorf("failed to create log folder")
- }
- }
- writer = &lumberjack.Logger{
- Filename: cfg.Default.LogFile,
- MaxSize: 500, // megabytes
- MaxBackups: 3,
- MaxAge: 28, // days
- Compress: true, // disabled by default
- }
- }
- return writer, nil
-}
-
-func ConvertFileToBase64(file string) (string, error) {
- bytes, err := os.ReadFile(file)
- if err != nil {
- return "", errors.Wrap(err, "reading file")
- }
-
- return base64.StdEncoding.EncodeToString(bytes), nil
-}
-
-func OSToOSType(os string) (params.OSType, error) {
- osType, ok := OSToOSTypeMap[strings.ToLower(os)]
- if !ok {
- return params.Unknown, fmt.Errorf("no OS to OS type mapping for %s", os)
- }
- return osType, nil
-}
-
-func GithubClient(ctx context.Context, token string, credsDetails params.GithubCredentials) (common.GithubClient, common.GithubEnterpriseClient, error) {
- var roots *x509.CertPool
- if credsDetails.CABundle != nil && len(credsDetails.CABundle) > 0 {
- roots = x509.NewCertPool()
- ok := roots.AppendCertsFromPEM(credsDetails.CABundle)
- if !ok {
- return nil, nil, fmt.Errorf("failed to parse CA cert")
- }
- }
- httpTransport := &http.Transport{
- TLSClientConfig: &tls.Config{
- ClientCAs: roots,
- },
- }
- httpClient := &http.Client{Transport: httpTransport}
- ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
-
- ts := oauth2.StaticTokenSource(
- &oauth2.Token{AccessToken: token},
- )
- tc := oauth2.NewClient(ctx, ts)
-
- ghClient, err := github.NewEnterpriseClient(credsDetails.APIBaseURL, credsDetails.UploadBaseURL, tc)
- if err != nil {
- return nil, nil, errors.Wrap(err, "fetching github client")
- }
-
- return ghClient.Actions, ghClient.Enterprise, nil
-}
-
-func GetCloudConfig(bootstrapParams params.BootstrapInstance, tools github.RunnerApplicationDownload, runnerName string) (string, error) {
- if tools.Filename == nil {
- return "", fmt.Errorf("missing tools filename")
- }
-
- if tools.DownloadURL == nil {
- return "", fmt.Errorf("missing tools download URL")
- }
-
- var tempToken string
- if tools.TempDownloadToken != nil {
- tempToken = *tools.TempDownloadToken
- }
-
- installRunnerParams := cloudconfig.InstallRunnerParams{
- FileName: *tools.Filename,
- DownloadURL: *tools.DownloadURL,
- TempDownloadToken: tempToken,
- MetadataURL: bootstrapParams.MetadataURL,
- RunnerUsername: appdefaults.DefaultUser,
- RunnerGroup: appdefaults.DefaultUser,
- RepoURL: bootstrapParams.RepoURL,
- RunnerName: runnerName,
- RunnerLabels: strings.Join(bootstrapParams.Labels, ","),
- CallbackURL: bootstrapParams.CallbackURL,
- CallbackToken: bootstrapParams.InstanceToken,
- GitHubRunnerGroup: bootstrapParams.GitHubRunnerGroup,
- }
- if bootstrapParams.CACertBundle != nil && len(bootstrapParams.CACertBundle) > 0 {
- installRunnerParams.CABundle = string(bootstrapParams.CACertBundle)
- }
-
- installScript, err := cloudconfig.InstallRunnerScript(installRunnerParams, bootstrapParams.OSType)
- if err != nil {
- return "", errors.Wrap(err, "generating script")
- }
-
- var asStr string
- switch bootstrapParams.OSType {
- case params.Linux:
- cloudCfg := cloudconfig.NewDefaultCloudInitConfig()
-
- if bootstrapParams.UserDataOptions.DisableUpdatesOnBoot {
- cloudCfg.PackageUpgrade = false
- cloudCfg.Packages = []string{}
- }
- for _, pkg := range bootstrapParams.UserDataOptions.ExtraPackages {
- cloudCfg.AddPackage(pkg)
- }
-
- cloudCfg.AddSSHKey(bootstrapParams.SSHKeys...)
- cloudCfg.AddFile(installScript, "/install_runner.sh", "root:root", "755")
- cloudCfg.AddRunCmd("/install_runner.sh")
- cloudCfg.AddRunCmd("rm -f /install_runner.sh")
- if bootstrapParams.CACertBundle != nil && len(bootstrapParams.CACertBundle) > 0 {
- if err := cloudCfg.AddCACert(bootstrapParams.CACertBundle); err != nil {
- return "", errors.Wrap(err, "adding CA cert bundle")
- }
- }
- var err error
- asStr, err = cloudCfg.Serialize()
- if err != nil {
- return "", errors.Wrap(err, "creating cloud config")
- }
- case params.Windows:
- asStr = string(installScript)
- default:
- return "", fmt.Errorf("unknown os type: %s", bootstrapParams.OSType)
- }
-
- return asStr, nil
-}
-
-func GetTools(osType params.OSType, osArch params.OSArch, tools []*github.RunnerApplicationDownload) (github.RunnerApplicationDownload, error) {
- // Validate image OS. Linux only for now.
- switch osType {
- case params.Linux:
- case params.Windows:
- default:
- return github.RunnerApplicationDownload{}, fmt.Errorf("unsupported OS type: %s", osType)
- }
-
- switch osArch {
- case params.Amd64:
- case params.Arm:
- case params.Arm64:
- default:
- return github.RunnerApplicationDownload{}, fmt.Errorf("unsupported OS arch: %s", osArch)
- }
-
- // Find tools for OS/Arch.
- for _, tool := range tools {
- if tool == nil {
- continue
- }
- if tool.OS == nil || tool.Architecture == nil {
- continue
- }
-
- ghArch, err := ResolveToGithubArch(string(osArch))
- if err != nil {
- continue
- }
-
- ghOS, err := ResolveToGithubOSType(string(osType))
- if err != nil {
- continue
- }
- if *tool.Architecture == ghArch && *tool.OS == ghOS {
- return *tool, nil
- }
- }
- return github.RunnerApplicationDownload{}, fmt.Errorf("failed to find tools for OS %s and arch %s", osType, osArch)
-}
-
-// GetRandomString returns a secure random string
-func GetRandomString(n int) (string, error) {
- data := make([]byte, n)
- _, err := rand.Read(data)
- if err != nil {
- return "", errors.Wrap(err, "getting random data")
- }
- for i, b := range data {
- data[i] = alphanumeric[b%byte(len(alphanumeric))]
- }
-
- return string(data), nil
-}
-
-func Aes256EncodeString(target string, passphrase string) ([]byte, error) {
- if len(passphrase) != 32 {
- return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
- }
-
- toEncrypt := []byte(target)
- block, err := aes.NewCipher([]byte(passphrase))
- if err != nil {
- return nil, errors.Wrap(err, "creating cipher")
- }
-
- aesgcm, err := cipher.NewGCM(block)
- if err != nil {
- return nil, errors.Wrap(err, "creating new aead")
- }
-
- nonce := make([]byte, aesgcm.NonceSize())
- if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
- return nil, errors.Wrap(err, "creating nonce")
- }
-
- ciphertext := aesgcm.Seal(nonce, nonce, toEncrypt, nil)
- return ciphertext, nil
-}
-
-func Aes256DecodeString(target []byte, passphrase string) (string, error) {
- if len(passphrase) != 32 {
- return "", fmt.Errorf("invalid passphrase length (expected length 32 characters)")
- }
-
- block, err := aes.NewCipher([]byte(passphrase))
- if err != nil {
- return "", errors.Wrap(err, "creating cipher")
- }
-
- aesgcm, err := cipher.NewGCM(block)
- if err != nil {
- return "", errors.Wrap(err, "creating new aead")
- }
-
- nonceSize := aesgcm.NonceSize()
- if len(target) < nonceSize {
- return "", fmt.Errorf("failed to decrypt text")
- }
-
- nonce, ciphertext := target[:nonceSize], target[nonceSize:]
- plaintext, err := aesgcm.Open(nil, nonce, ciphertext, nil)
- if err != nil {
- return "", fmt.Errorf("failed to decrypt text")
- }
- return string(plaintext), nil
-}
-
-// PaswsordToBcrypt returns a bcrypt hash of the specified password using the default cost
-func PaswsordToBcrypt(password string) (string, error) {
- hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
- if err != nil {
- return "", fmt.Errorf("failed to hash password")
- }
- return string(hashedPassword), nil
-}
-
-func NewLoggingMiddleware(writer io.Writer) func(http.Handler) http.Handler {
- return func(next http.Handler) http.Handler {
- return gorillaHandlers.CombinedLoggingHandler(writer, next)
- }
-}
-
-func SanitizeLogEntry(entry string) string {
- return strings.Replace(strings.Replace(entry, "\n", "", -1), "\r", "", -1)
-}
-
-func toBase62(uuid []byte) string {
- var i big.Int
- i.SetBytes(uuid[:])
- return i.Text(62)
-}
-
-func NewID() string {
- short, err := shortid.Generate()
- if err == nil {
- return toBase62([]byte(short))
- }
- newUUID := uuid.New()
- return toBase62(newUUID[:])
-}
-
-func UTF16FromString(s string) ([]uint16, error) {
- buf := make([]uint16, 0, len(s)*2+1)
- for _, r := range s {
- buf = utf16.AppendRune(buf, r)
- }
- return utf16.AppendRune(buf, '\x00'), nil
-}
-
-func UTF16ToString(s []uint16) string {
- for i, v := range s {
- if v == 0 {
- s = s[0:i]
- break
- }
- }
- return string(utf16.Decode(s))
-}
-
-func Uint16ToByteArray(u []uint16) []byte {
- ret := make([]byte, (len(u)-1)*2)
- for i := 0; i < len(u)-1; i++ {
- binary.LittleEndian.PutUint16(ret[i*2:], uint16(u[i]))
- }
- return ret
-}
-
-func UTF16EncodedByteArrayFromString(s string) ([]byte, error) {
- asUint16, err := UTF16FromString(s)
- if err != nil {
- return nil, fmt.Errorf("failed to encode to uint16: %w", err)
- }
- asBytes := Uint16ToByteArray(asUint16)
- return asBytes, nil
-}
-
-func CompressData(data []byte) ([]byte, error) {
- var b bytes.Buffer
- gz := gzip.NewWriter(&b)
-
- _, err := gz.Write(data)
- if err != nil {
- return nil, fmt.Errorf("failed to compress data: %w", err)
- }
-
- if err = gz.Flush(); err != nil {
- return nil, fmt.Errorf("failed to flush buffer: %w", err)
- }
-
- if err = gz.Close(); err != nil {
- return nil, fmt.Errorf("failed to close buffer: %w", err)
- }
-
- return b.Bytes(), nil
-}
diff --git a/util/util_test.go b/util/util_test.go
new file mode 100644
index 00000000..f04dab84
--- /dev/null
+++ b/util/util_test.go
@@ -0,0 +1,394 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package util
+
+import (
+ "testing"
+)
+
+func TestASCIIEqualFold(t *testing.T) {
+ tests := []struct {
+ name string
+ s string
+ t string
+ expected bool
+ reason string
+ }{
+ // Basic ASCII case folding tests
+ {
+ name: "identical strings",
+ s: "hello",
+ t: "hello",
+ expected: true,
+ reason: "identical strings should match",
+ },
+ {
+ name: "simple case difference",
+ s: "Hello",
+ t: "hello",
+ expected: true,
+ reason: "ASCII case folding should match H/h",
+ },
+ {
+ name: "all uppercase vs lowercase",
+ s: "HELLO",
+ t: "hello",
+ expected: true,
+ reason: "ASCII case folding should match all cases",
+ },
+ {
+ name: "mixed case",
+ s: "HeLLo",
+ t: "hEllO",
+ expected: true,
+ reason: "mixed case should match after folding",
+ },
+
+ // Empty string tests
+ {
+ name: "both empty",
+ s: "",
+ t: "",
+ expected: true,
+ reason: "empty strings should match",
+ },
+ {
+ name: "one empty",
+ s: "hello",
+ t: "",
+ expected: false,
+ reason: "different length strings should not match",
+ },
+ {
+ name: "other empty",
+ s: "",
+ t: "hello",
+ expected: false,
+ reason: "different length strings should not match",
+ },
+
+ // Different content tests
+ {
+ name: "different strings same case",
+ s: "hello",
+ t: "world",
+ expected: false,
+ reason: "different content should not match",
+ },
+ {
+ name: "different strings different case",
+ s: "Hello",
+ t: "World",
+ expected: false,
+ reason: "different content should not match regardless of case",
+ },
+ {
+ name: "different length",
+ s: "hello",
+ t: "hello world",
+ expected: false,
+ reason: "different length strings should not match",
+ },
+
+ // ASCII non-alphabetic characters
+ {
+ name: "numbers and symbols",
+ s: "Hello123!@#",
+ t: "hello123!@#",
+ expected: true,
+ reason: "numbers and symbols should be preserved, only letters folded",
+ },
+ {
+ name: "different numbers",
+ s: "Hello123",
+ t: "Hello124",
+ expected: false,
+ reason: "different numbers should not match",
+ },
+ {
+ name: "different symbols",
+ s: "Hello!",
+ t: "Hello?",
+ expected: false,
+ reason: "different symbols should not match",
+ },
+
+ // URL-specific tests (CORS security focus)
+ {
+ name: "HTTP scheme case",
+ s: "HTTP://example.com",
+ t: "http://example.com",
+ expected: true,
+ reason: "HTTP scheme should be case-insensitive",
+ },
+ {
+ name: "HTTPS scheme case",
+ s: "HTTPS://EXAMPLE.COM",
+ t: "https://example.com",
+ expected: true,
+ reason: "HTTPS scheme and domain should be case-insensitive",
+ },
+ {
+ name: "complex URL case",
+ s: "HTTPS://API.EXAMPLE.COM:8080/PATH",
+ t: "https://api.example.com:8080/path",
+ expected: true,
+ reason: "entire URL should be case-insensitive for ASCII",
+ },
+ {
+ name: "subdomain case",
+ s: "https://API.SUB.EXAMPLE.COM",
+ t: "https://api.sub.example.com",
+ expected: true,
+ reason: "subdomains should be case-insensitive",
+ },
+
+ // Unicode security tests (homograph attack prevention)
+ {
+ name: "cyrillic homograph attack",
+ s: "https://еxample.com", // Cyrillic 'е' (U+0435)
+ t: "https://example.com", // Latin 'e' (U+0065)
+ expected: false,
+ reason: "should block Cyrillic homograph attack",
+ },
+ {
+ name: "mixed cyrillic attack",
+ s: "https://ехample.com", // Cyrillic 'е' and 'х'
+ t: "https://example.com", // Latin 'e' and 'x'
+ expected: false,
+ reason: "should block mixed Cyrillic homograph attack",
+ },
+ {
+ name: "cyrillic 'а' attack",
+ s: "https://exаmple.com", // Cyrillic 'а' (U+0430)
+ t: "https://example.com", // Latin 'a' (U+0061)
+ expected: false,
+ reason: "should block Cyrillic 'а' homograph attack",
+ },
+
+ // Unicode case folding security tests
+ {
+ name: "unicode case folding attack",
+ s: "https://CAFÉ.com", // Latin É (U+00C9)
+ t: "https://café.com", // Latin é (U+00E9)
+ expected: false,
+ reason: "should NOT perform Unicode case folding (security)",
+ },
+ {
+ name: "turkish i attack",
+ s: "https://İSTANBUL.com", // Turkish İ (U+0130)
+ t: "https://istanbul.com", // Latin i
+ expected: false,
+ reason: "should NOT perform Turkish case folding",
+ },
+ {
+ name: "german sharp s",
+ s: "https://GROß.com", // German ß (U+00DF)
+ t: "https://gross.com", // Expanded form
+ expected: false,
+ reason: "should NOT perform German ß expansion",
+ },
+
+ // Valid Unicode exact matches
+ {
+ name: "identical unicode",
+ s: "https://café.com",
+ t: "https://café.com",
+ expected: true,
+ reason: "identical Unicode strings should match",
+ },
+ {
+ name: "identical cyrillic",
+ s: "https://пример.com", // Russian
+ t: "https://пример.com", // Russian
+ expected: true,
+ reason: "identical Cyrillic strings should match",
+ },
+ {
+ name: "ascii part of unicode domain",
+ s: "HTTPS://café.COM", // ASCII parts should fold
+ t: "https://café.com",
+ expected: true,
+ reason: "ASCII parts should fold even in Unicode strings",
+ },
+
+ // Edge cases with UTF-8
+ {
+ name: "different UTF-8 byte length same rune count",
+ s: "Café", // é is 2 bytes
+ t: "Café", // é is 2 bytes (same)
+ expected: true,
+ reason: "same Unicode content should match",
+ },
+ {
+ name: "UTF-8 normalization difference",
+ s: "café\u0301", // é as e + combining acute (3 bytes for é part)
+ t: "café", // é as single character (2 bytes for é part)
+ expected: false,
+ reason: "different Unicode normalization should not match",
+ },
+ {
+ name: "CRITICAL: current implementation flaw",
+ s: "ABC" + string([]byte{0xC3, 0xA9}), // ABC + é (2 bytes) = 5 bytes
+ t: "abc" + string([]byte{0xC3, 0xA9}), // abc + é (2 bytes) = 5 bytes
+ expected: true,
+ reason: "should match after ASCII folding (this should pass with correct implementation)",
+ },
+ {
+ name: "invalid UTF-8 sequence",
+ s: "hello\xff", // Invalid UTF-8
+ t: "hello\xff", // Invalid UTF-8
+ expected: true,
+ reason: "identical invalid UTF-8 should match",
+ },
+ {
+ name: "different invalid UTF-8",
+ s: "hello\xff", // Invalid UTF-8
+ t: "hello\xfe", // Different invalid UTF-8
+ expected: false,
+ reason: "different invalid UTF-8 should not match",
+ },
+
+ // ASCII boundary tests
+ {
+ name: "ascii boundary characters",
+ s: "A@Z[`a{z", // Test boundaries around A-Z
+ t: "a@z[`A{Z",
+ expected: true,
+ reason: "only A-Z should be folded, not punctuation",
+ },
+ {
+ name: "digit boundaries",
+ s: "Test123ABC",
+ t: "test123abc",
+ expected: true,
+ reason: "digits should not be folded, only letters",
+ },
+
+ // Long string performance tests
+ {
+ name: "long ascii string",
+ s: "HTTP://" + repeatString("ABCDEFGHIJKLMNOPQRSTUVWXYZ", 100) + ".COM",
+ t: "http://" + repeatString("abcdefghijklmnopqrstuvwxyz", 100) + ".com",
+ expected: true,
+ reason: "long ASCII strings should be handled efficiently",
+ },
+ {
+ name: "long unicode string",
+ s: repeatString("CAFÉ", 100),
+ t: repeatString("CAFÉ", 100), // Same case - should match
+ expected: true,
+ reason: "long identical Unicode strings should match",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := ASCIIEqualFold(tt.s, tt.t)
+ if result != tt.expected {
+ t.Errorf("ASCIIEqualFold(%q, %q) = %v, expected %v\nReason: %s",
+ tt.s, tt.t, result, tt.expected, tt.reason)
+ }
+ })
+ }
+}
+
+// Helper function for generating long test strings
+func repeatString(s string, count int) string {
+ if count <= 0 {
+ return ""
+ }
+ result := make([]byte, 0, len(s)*count)
+ for i := 0; i < count; i++ {
+ result = append(result, s...)
+ }
+ return string(result)
+}
+
+// Benchmark tests for performance verification
+func BenchmarkASCIIEqualFold(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ s string
+ t string
+ }{
+ {
+ name: "short_ascii_match",
+ s: "HTTP://EXAMPLE.COM",
+ t: "http://example.com",
+ },
+ {
+ name: "short_ascii_nomatch",
+ s: "HTTP://EXAMPLE.COM",
+ t: "http://different.com",
+ },
+ {
+ name: "long_ascii_match",
+ s: "HTTP://" + repeatString("ABCDEFGHIJKLMNOPQRSTUVWXYZ", 100) + ".COM",
+ t: "http://" + repeatString("abcdefghijklmnopqrstuvwxyz", 100) + ".com",
+ },
+ {
+ name: "unicode_nomatch",
+ s: "https://café.com",
+ t: "https://CAFÉ.com",
+ },
+ {
+ name: "unicode_exact_match",
+ s: "https://café.com",
+ t: "https://café.com",
+ },
+ }
+
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ ASCIIEqualFold(bm.s, bm.t)
+ }
+ })
+ }
+}
+
+// Fuzzing test to catch edge cases
+func FuzzASCIIEqualFold(f *testing.F) {
+ // Seed with interesting test cases
+ seeds := [][]string{
+ {"hello", "HELLO"},
+ {"", ""},
+ {"café", "CAFÉ"},
+ {"https://example.com", "HTTPS://EXAMPLE.COM"},
+ {"еxample", "example"}, // Cyrillic attack
+ {string([]byte{0xff}), string([]byte{0xfe})}, // Invalid UTF-8
+ }
+
+ for _, seed := range seeds {
+ f.Add(seed[0], seed[1])
+ }
+
+ f.Fuzz(func(t *testing.T, s1, s2 string) {
+ // Just ensure it doesn't panic and returns a boolean
+ result := ASCIIEqualFold(s1, s2)
+ _ = result // Use the result to prevent optimization
+
+ // Property: function should be symmetric
+ if ASCIIEqualFold(s1, s2) != ASCIIEqualFold(s2, s1) {
+ t.Errorf("ASCIIEqualFold is not symmetric: (%q, %q)", s1, s2)
+ }
+
+ // Property: identical strings should always match
+ if s1 == s2 && !ASCIIEqualFold(s1, s2) {
+ t.Errorf("identical strings should match: %q", s1)
+ }
+ })
+}
diff --git a/vendor/golang.org/x/term/LICENSE b/vendor/filippo.io/edwards25519/LICENSE
similarity index 100%
rename from vendor/golang.org/x/term/LICENSE
rename to vendor/filippo.io/edwards25519/LICENSE
diff --git a/vendor/filippo.io/edwards25519/README.md b/vendor/filippo.io/edwards25519/README.md
new file mode 100644
index 00000000..24e2457d
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/README.md
@@ -0,0 +1,14 @@
+# filippo.io/edwards25519
+
+```
+import "filippo.io/edwards25519"
+```
+
+This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives.
+Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519).
+
+The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality.
+
+Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/internal/edwards25519`/`crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative.
+
+Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements.
diff --git a/vendor/filippo.io/edwards25519/doc.go b/vendor/filippo.io/edwards25519/doc.go
new file mode 100644
index 00000000..ab6aaebc
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/doc.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edwards25519 implements group logic for the twisted Edwards curve
+//
+// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
+//
+// This is better known as the Edwards curve equivalent to Curve25519, and is
+// the curve used by the Ed25519 signature scheme.
+//
+// Most users don't need this package, and should instead use crypto/ed25519 for
+// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
+// github.com/gtank/ristretto255 for prime order group logic.
+//
+// However, developers who do need to interact with low-level edwards25519
+// operations can use this package, which is an extended version of
+// crypto/internal/edwards25519 from the standard library repackaged as
+// an importable module.
+package edwards25519
diff --git a/vendor/filippo.io/edwards25519/edwards25519.go b/vendor/filippo.io/edwards25519/edwards25519.go
new file mode 100644
index 00000000..a744da2c
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/edwards25519.go
@@ -0,0 +1,427 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "errors"
+
+ "filippo.io/edwards25519/field"
+)
+
+// Point types.
+
+type projP1xP1 struct {
+ X, Y, Z, T field.Element
+}
+
+type projP2 struct {
+ X, Y, Z field.Element
+}
+
+// Point represents a point on the edwards25519 curve.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is NOT valid, and it may be used only as a receiver.
+type Point struct {
+ // Make the type not comparable (i.e. used with == or as a map key), as
+ // equivalent points can be represented by different Go values.
+ _ incomparable
+
+ // The point is internally represented in extended coordinates (X, Y, Z, T)
+ // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
+ x, y, z, t field.Element
+}
+
+type incomparable [0]func()
+
+func checkInitialized(points ...*Point) {
+ for _, p := range points {
+ if p.x == (field.Element{}) && p.y == (field.Element{}) {
+ panic("edwards25519: use of uninitialized Point")
+ }
+ }
+}
+
+type projCached struct {
+ YplusX, YminusX, Z, T2d field.Element
+}
+
+type affineCached struct {
+ YplusX, YminusX, T2d field.Element
+}
+
+// Constructors.
+
+func (v *projP2) Zero() *projP2 {
+ v.X.Zero()
+ v.Y.One()
+ v.Z.One()
+ return v
+}
+
+// identity is the point at infinity.
+var identity, _ = new(Point).SetBytes([]byte{
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
+
+// NewIdentityPoint returns a new Point set to the identity.
+func NewIdentityPoint() *Point {
+ return new(Point).Set(identity)
+}
+
+// generator is the canonical curve basepoint. See TestGenerator for the
+// correspondence of this encoding with the values in RFC 8032.
+var generator, _ = new(Point).SetBytes([]byte{
+ 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
+
+// NewGeneratorPoint returns a new Point set to the canonical generator.
+func NewGeneratorPoint() *Point {
+ return new(Point).Set(generator)
+}
+
+func (v *projCached) Zero() *projCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.Z.One()
+ v.T2d.Zero()
+ return v
+}
+
+func (v *affineCached) Zero() *affineCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.T2d.Zero()
+ return v
+}
+
+// Assignments.
+
+// Set sets v = u, and returns v.
+func (v *Point) Set(u *Point) *Point {
+ *v = *u
+ return v
+}
+
+// Encoding.
+
+// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
+// Section 5.1.2.
+func (v *Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var buf [32]byte
+ return v.bytes(&buf)
+}
+
+func (v *Point) bytes(buf *[32]byte) []byte {
+ checkInitialized(v)
+
+ var zInv, x, y field.Element
+ zInv.Invert(&v.z) // zInv = 1 / Z
+ x.Multiply(&v.x, &zInv) // x = X / Z
+ y.Multiply(&v.y, &zInv) // y = Y / Z
+
+ out := copyFieldElement(buf, &y)
+ out[31] |= byte(x.IsNegative() << 7)
+ return out
+}
+
+var feOne = new(field.Element).One()
+
+// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
+// represent a valid point on the curve, SetBytes returns nil and an error and
+// the receiver is unchanged. Otherwise, SetBytes returns v.
+//
+// Note that SetBytes accepts all non-canonical encodings of valid points.
+// That is, it follows decoding rules that match most implementations in
+// the ecosystem rather than RFC 8032.
+func (v *Point) SetBytes(x []byte) (*Point, error) {
+ // Specifically, the non-canonical encodings that are accepted are
+ // 1) the ones where the field element is not reduced (see the
+ // (*field.Element).SetBytes docs) and
+ // 2) the ones where the x-coordinate is zero and the sign bit is set.
+ //
+ // Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am,
+ // specifically the "Canonical A, R" section.
+
+ y, err := new(field.Element).SetBytes(x)
+ if err != nil {
+ return nil, errors.New("edwards25519: invalid point encoding length")
+ }
+
+ // -x² + y² = 1 + dx²y²
+ // x² + dx²y² = x²(dy² + 1) = y² - 1
+ // x² = (y² - 1) / (dy² + 1)
+
+ // u = y² - 1
+ y2 := new(field.Element).Square(y)
+ u := new(field.Element).Subtract(y2, feOne)
+
+ // v = dy² + 1
+ vv := new(field.Element).Multiply(y2, d)
+ vv = vv.Add(vv, feOne)
+
+ // x = +√(u/v)
+ xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
+ if wasSquare == 0 {
+ return nil, errors.New("edwards25519: invalid point encoding")
+ }
+
+ // Select the negative square root if the sign bit is set.
+ xxNeg := new(field.Element).Negate(xx)
+ xx = xx.Select(xxNeg, xx, int(x[31]>>7))
+
+ v.x.Set(xx)
+ v.y.Set(y)
+ v.z.One()
+ v.t.Multiply(xx, y) // xy = T / Z
+
+ return v, nil
+}
+
+func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
+ copy(buf[:], v.Bytes())
+ return buf[:]
+}
+
+// Conversions.
+
+func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
+ v.X.Multiply(&p.X, &p.T)
+ v.Y.Multiply(&p.Y, &p.Z)
+ v.Z.Multiply(&p.Z, &p.T)
+ return v
+}
+
+func (v *projP2) FromP3(p *Point) *projP2 {
+ v.X.Set(&p.x)
+ v.Y.Set(&p.y)
+ v.Z.Set(&p.z)
+ return v
+}
+
+func (v *Point) fromP1xP1(p *projP1xP1) *Point {
+ v.x.Multiply(&p.X, &p.T)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Multiply(&p.Z, &p.T)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+func (v *Point) fromP2(p *projP2) *Point {
+ v.x.Multiply(&p.X, &p.Z)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Square(&p.Z)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+// d is a constant in the curve equation.
+var d, _ = new(field.Element).SetBytes([]byte{
+ 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
+ 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
+ 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
+ 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
+var d2 = new(field.Element).Add(d, d)
+
+func (v *projCached) FromP3(p *Point) *projCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.Z.Set(&p.z)
+ v.T2d.Multiply(&p.t, d2)
+ return v
+}
+
+func (v *affineCached) FromP3(p *Point) *affineCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.T2d.Multiply(&p.t, d2)
+
+ var invZ field.Element
+ invZ.Invert(&p.z)
+ v.YplusX.Multiply(&v.YplusX, &invZ)
+ v.YminusX.Multiply(&v.YminusX, &invZ)
+ v.T2d.Multiply(&v.T2d, &invZ)
+ return v
+}
+
+// (Re)addition and subtraction.
+
+// Add sets v = p + q, and returns v.
+func (v *Point) Add(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Add(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+// Subtract sets v = p - q, and returns v.
+func (v *Point) Subtract(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Sub(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&ZZ2, &TT2d)
+ v.T.Subtract(&ZZ2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
+ v.T.Add(&ZZ2, &TT2d) // flipped sign
+ return v
+}
+
+func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&Z2, &TT2d)
+ v.T.Subtract(&Z2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&Z2, &TT2d) // flipped sign
+ v.T.Add(&Z2, &TT2d) // flipped sign
+ return v
+}
+
+// Doubling.
+
+func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
+ var XX, YY, ZZ2, XplusYsq field.Element
+
+ XX.Square(&p.X)
+ YY.Square(&p.Y)
+ ZZ2.Square(&p.Z)
+ ZZ2.Add(&ZZ2, &ZZ2)
+ XplusYsq.Add(&p.X, &p.Y)
+ XplusYsq.Square(&XplusYsq)
+
+ v.Y.Add(&YY, &XX)
+ v.Z.Subtract(&YY, &XX)
+
+ v.X.Subtract(&XplusYsq, &v.Y)
+ v.T.Subtract(&ZZ2, &v.Z)
+ return v
+}
+
+// Negation.
+
+// Negate sets v = -p, and returns v.
+func (v *Point) Negate(p *Point) *Point {
+ checkInitialized(p)
+ v.x.Negate(&p.x)
+ v.y.Set(&p.y)
+ v.z.Set(&p.z)
+ v.t.Negate(&p.t)
+ return v
+}
+
+// Equal returns 1 if v is equivalent to u, and 0 otherwise.
+func (v *Point) Equal(u *Point) int {
+ checkInitialized(v, u)
+
+ var t1, t2, t3, t4 field.Element
+ t1.Multiply(&v.x, &u.z)
+ t2.Multiply(&u.x, &v.z)
+ t3.Multiply(&v.y, &u.z)
+ t4.Multiply(&u.y, &v.z)
+
+ return t1.Equal(&t2) & t3.Equal(&t4)
+}
+
+// Constant-time operations
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *projCached) Select(a, b *projCached, cond int) *projCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.Z.Select(&a.Z, &b.Z, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *projCached) CondNeg(cond int) *projCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *affineCached) CondNeg(cond int) *affineCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
diff --git a/vendor/filippo.io/edwards25519/extra.go b/vendor/filippo.io/edwards25519/extra.go
new file mode 100644
index 00000000..d152d68f
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/extra.go
@@ -0,0 +1,349 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+// This file contains additional functionality that is not included in the
+// upstream crypto/internal/edwards25519 package.
+
+import (
+ "errors"
+
+ "filippo.io/edwards25519/field"
+)
+
+// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where
+// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
+func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap. Don't change the style without making
+ // sure it doesn't increase the inliner cost.
+ var e [4]field.Element
+ X, Y, Z, T = v.extendedCoordinates(&e)
+ return
+}
+
+func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) {
+ checkInitialized(v)
+ X = e[0].Set(&v.x)
+ Y = e[1].Set(&v.y)
+ Z = e[2].Set(&v.z)
+ T = e[3].Set(&v.t)
+ return
+}
+
+// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where
+// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
+//
+// If the coordinates are invalid or don't represent a valid point on the curve,
+// SetExtendedCoordinates returns nil and an error and the receiver is
+// unchanged. Otherwise, SetExtendedCoordinates returns v.
+func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) {
+ if !isOnCurve(X, Y, Z, T) {
+ return nil, errors.New("edwards25519: invalid point coordinates")
+ }
+ v.x.Set(X)
+ v.y.Set(Y)
+ v.z.Set(Z)
+ v.t.Set(T)
+ return v, nil
+}
+
+func isOnCurve(X, Y, Z, T *field.Element) bool {
+ var lhs, rhs field.Element
+ XX := new(field.Element).Square(X)
+ YY := new(field.Element).Square(Y)
+ ZZ := new(field.Element).Square(Z)
+ TT := new(field.Element).Square(T)
+ // -x² + y² = 1 + dx²y²
+ // -(X/Z)² + (Y/Z)² = 1 + d(T/Z)²
+ // -X² + Y² = Z² + dT²
+ lhs.Subtract(YY, XX)
+ rhs.Multiply(d, TT).Add(&rhs, ZZ)
+ if lhs.Equal(&rhs) != 1 {
+ return false
+ }
+ // xy = T/Z
+ // XY/Z² = T/Z
+ // XY = TZ
+ lhs.Multiply(X, Y)
+ rhs.Multiply(T, Z)
+ return lhs.Equal(&rhs) == 1
+}
+
+// BytesMontgomery converts v to a point on the birationally-equivalent
+// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding
+// according to RFC 7748.
+//
+// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode
+// to the same value. If v is the identity point, BytesMontgomery returns 32
+// zero bytes, analogously to the X25519 function.
+//
+// The lack of an inverse operation (such as SetMontgomeryBytes) is deliberate:
+// while every valid edwards25519 point has a unique u-coordinate Montgomery
+// encoding, X25519 accepts inputs on the quadratic twist, which don't correspond
+// to any edwards25519 point, and every other X25519 input corresponds to two
+// edwards25519 points.
+func (v *Point) BytesMontgomery() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var buf [32]byte
+ return v.bytesMontgomery(&buf)
+}
+
+func (v *Point) bytesMontgomery(buf *[32]byte) []byte {
+ checkInitialized(v)
+
+ // RFC 7748, Section 4.1 provides the bilinear map to calculate the
+ // Montgomery u-coordinate
+ //
+ // u = (1 + y) / (1 - y)
+ //
+ // where y = Y / Z.
+
+ var y, recip, u field.Element
+
+ y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z
+ recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y)
+ u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r
+
+ return copyFieldElement(buf, &u)
+}
+
+// MultByCofactor sets v = 8 * p, and returns v.
+func (v *Point) MultByCofactor(p *Point) *Point {
+ checkInitialized(p)
+ result := projP1xP1{}
+ pp := (&projP2{}).FromP3(p)
+ result.Double(pp)
+ pp.FromP1xP1(&result)
+ result.Double(pp)
+ pp.FromP1xP1(&result)
+ result.Double(pp)
+ return v.fromP1xP1(&result)
+}
+
+// Given k > 0, set s = s**(2*i).
+func (s *Scalar) pow2k(k int) {
+ for i := 0; i < k; i++ {
+ s.Multiply(s, s)
+ }
+}
+
+// Invert sets s to the inverse of a nonzero scalar v, and returns s.
+//
+// If t is zero, Invert returns zero.
+func (s *Scalar) Invert(t *Scalar) *Scalar {
+ // Uses a hardcoded sliding window of width 4.
+ var table [8]Scalar
+ var tt Scalar
+ tt.Multiply(t, t)
+ table[0] = *t
+ for i := 0; i < 7; i++ {
+ table[i+1].Multiply(&table[i], &tt)
+ }
+ // Now table = [t**1, t**3, t**5, t**7, t**9, t**11, t**13, t**15]
+ // so t**k = t[k/2] for odd k
+
+ // To compute the sliding window digits, use the following Sage script:
+
+ // sage: import itertools
+ // sage: def sliding_window(w,k):
+ // ....: digits = []
+ // ....: while k > 0:
+ // ....: if k % 2 == 1:
+ // ....: kmod = k % (2**w)
+ // ....: digits.append(kmod)
+ // ....: k = k - kmod
+ // ....: else:
+ // ....: digits.append(0)
+ // ....: k = k // 2
+ // ....: return digits
+
+ // Now we can compute s roughly as follows:
+
+ // sage: s = 1
+ // sage: for coeff in reversed(sliding_window(4,l-2)):
+ // ....: s = s*s
+ // ....: if coeff > 0 :
+ // ....: s = s*t**coeff
+
+ // This works on one bit at a time, with many runs of zeros.
+ // The digits can be collapsed into [(count, coeff)] as follows:
+
+ // sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))]
+
+ // Entries of the form (k, 0) turn into pow2k(k)
+ // Entries of the form (1, coeff) turn into a squaring and then a table lookup.
+ // We can fold the squaring into the previous pow2k(k) as pow2k(k+1).
+
+ *s = table[1/2]
+ s.pow2k(127 + 1)
+ s.Multiply(s, &table[1/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[13/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[5/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[1/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(5 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(9 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[13/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[11/2])
+
+ return s
+}
+
+// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
+//
+// Execution time depends only on the lengths of the two slices, which must match.
+func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point {
+ if len(scalars) != len(points) {
+ panic("edwards25519: called MultiScalarMult with different size inputs")
+ }
+ checkInitialized(points...)
+
+ // Proceed as in the single-base case, but share doublings
+ // between each point in the multiscalar equation.
+
+ // Build lookup tables for each point
+ tables := make([]projLookupTable, len(points))
+ for i := range tables {
+ tables[i].FromP3(points[i])
+ }
+ // Compute signed radix-16 digits for each scalar
+ digits := make([][64]int8, len(scalars))
+ for i := range digits {
+ digits[i] = scalars[i].signedRadix16()
+ }
+
+ // Unwrap first loop iteration to save computing 16*identity
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ // Lookup-and-add the appropriate multiple of each input point
+ for j := range tables {
+ tables[j].SelectInto(multiple, digits[j][63])
+ tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords
+ v.fromP1xP1(tmp1) // update v
+ }
+ tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
+ for i := 62; i >= 0; i-- {
+ tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
+ v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
+ // Lookup-and-add the appropriate multiple of each input point
+ for j := range tables {
+ tables[j].SelectInto(multiple, digits[j][i])
+ tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords
+ v.fromP1xP1(tmp1) // update v
+ }
+ tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
+ }
+ return v
+}
+
+// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point {
+ if len(scalars) != len(points) {
+ panic("edwards25519: called VarTimeMultiScalarMult with different size inputs")
+ }
+ checkInitialized(points...)
+
+ // Generalize double-base NAF computation to arbitrary sizes.
+ // Here all the points are dynamic, so we only use the smaller
+ // tables.
+
+ // Build lookup tables for each point
+ tables := make([]nafLookupTable5, len(points))
+ for i := range tables {
+ tables[i].FromP3(points[i])
+ }
+ // Compute a NAF for each scalar
+ nafs := make([][256]int8, len(scalars))
+ for i := range nafs {
+ nafs[i] = scalars[i].nonAdjacentForm(5)
+ }
+
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp2.Zero()
+
+ // Move from high to low bits, doubling the accumulator
+ // at each iteration and checking whether there is a nonzero
+ // coefficient to look up a multiple of.
+ //
+ // Skip trying to find the first nonzero coefficent, because
+ // searching might be more work than a few extra doublings.
+ for i := 255; i >= 0; i-- {
+ tmp1.Double(tmp2)
+
+ for j := range nafs {
+ if nafs[j][i] > 0 {
+ v.fromP1xP1(tmp1)
+ tables[j].SelectInto(multiple, nafs[j][i])
+ tmp1.Add(v, multiple)
+ } else if nafs[j][i] < 0 {
+ v.fromP1xP1(tmp1)
+ tables[j].SelectInto(multiple, -nafs[j][i])
+ tmp1.Sub(v, multiple)
+ }
+ }
+
+ tmp2.FromP1xP1(tmp1)
+ }
+
+ v.fromP2(tmp2)
+ return v
+}
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/vendor/filippo.io/edwards25519/field/fe.go
similarity index 90%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe.go
rename to vendor/filippo.io/edwards25519/field/fe.go
index ca841ad9..5518ef2b 100644
--- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go
+++ b/vendor/filippo.io/edwards25519/field/fe.go
@@ -8,6 +8,7 @@ package field
import (
"crypto/subtle"
"encoding/binary"
+ "errors"
"math/bits"
)
@@ -92,7 +93,7 @@ func (v *Element) Add(a, b *Element) *Element {
// Using the generic implementation here is actually faster than the
// assembly. Probably because the body of this function is so simple that
// the compiler can figure out better optimizations by inlining the carry
- // propagation. TODO
+ // propagation.
return v.carryPropagateGeneric()
}
@@ -186,14 +187,17 @@ func (v *Element) Set(a *Element) *Element {
return v
}
-// SetBytes sets v to x, which must be a 32-byte little-endian encoding.
+// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
+// not of the right length, SetBytes returns nil and an error, and the
+// receiver is unchanged.
//
// Consistent with RFC 7748, the most significant bit (the high bit of the
// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
-// are accepted. Note that this is laxer than specified by RFC 8032.
-func (v *Element) SetBytes(x []byte) *Element {
+// are accepted. Note that this is laxer than specified by RFC 8032, but
+// consistent with most Ed25519 implementations.
+func (v *Element) SetBytes(x []byte) (*Element, error) {
if len(x) != 32 {
- panic("edwards25519: invalid field element input size")
+ return nil, errors.New("edwards25519: invalid field element input size")
}
// Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
@@ -208,12 +212,12 @@ func (v *Element) SetBytes(x []byte) *Element {
// Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
v.l3 &= maskLow51Bits
- // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51).
+ // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
// Note: not bytes 25:33, shift 4, to avoid overread.
v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
v.l4 &= maskLow51Bits
- return v
+ return v, nil
}
// Bytes returns the canonical 32-byte little-endian encoding of v.
@@ -391,26 +395,26 @@ var sqrtM1 = &Element{1718705420411056, 234908883556509,
// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
// and returns r and 0.
-func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) {
- var a, b Element
+func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
+ t0 := new(Element)
// r = (u * v3) * (u * v7)^((p-5)/8)
- v2 := a.Square(v)
- uv3 := b.Multiply(u, b.Multiply(v2, v))
- uv7 := a.Multiply(uv3, a.Square(v2))
- r.Multiply(uv3, r.Pow22523(uv7))
+ v2 := new(Element).Square(v)
+ uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
+ uv7 := new(Element).Multiply(uv3, t0.Square(v2))
+ rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
- check := a.Multiply(v, a.Square(r)) // check = v * r^2
+ check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
- uNeg := b.Negate(u)
+ uNeg := new(Element).Negate(u)
correctSignSqrt := check.Equal(u)
flippedSignSqrt := check.Equal(uNeg)
- flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1))
+ flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
- rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r
+ rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
// r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
- r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI)
+ rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
- r.Absolute(r) // Choose the nonnegative square root.
+ r.Absolute(rr) // Choose the nonnegative square root.
return r, correctSignSqrt | flippedSignSqrt
}
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/filippo.io/edwards25519/field/fe_amd64.go
similarity index 100%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
rename to vendor/filippo.io/edwards25519/field/fe_amd64.go
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/vendor/filippo.io/edwards25519/field/fe_amd64.s
similarity index 100%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s
rename to vendor/filippo.io/edwards25519/field/fe_amd64.s
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
similarity index 100%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go
rename to vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/vendor/filippo.io/edwards25519/field/fe_arm64.go
similarity index 100%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go
rename to vendor/filippo.io/edwards25519/field/fe_arm64.go
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/vendor/filippo.io/edwards25519/field/fe_arm64.s
similarity index 97%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s
rename to vendor/filippo.io/edwards25519/field/fe_arm64.s
index 5c91e458..3126a434 100644
--- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s
+++ b/vendor/filippo.io/edwards25519/field/fe_arm64.s
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build arm64 && gc && !purego
-// +build arm64,gc,!purego
#include "textflag.h"
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
similarity index 100%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
rename to vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
diff --git a/vendor/filippo.io/edwards25519/field/fe_extra.go b/vendor/filippo.io/edwards25519/field/fe_extra.go
new file mode 100644
index 00000000..1ef503b9
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_extra.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "errors"
+
+// This file contains additional functionality that is not included in the
+// upstream crypto/ed25519/edwards25519/field package.
+
+// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which
+// is reduced modulo the field order. If x is not of the right length,
+// SetWideBytes returns nil and an error, and the receiver is unchanged.
+//
+// SetWideBytes is not necessary to select a uniformly distributed value, and is
+// only provided for compatibility: SetBytes can be used instead as the chance
+// of bias is less than 2⁻²⁵⁰.
+func (v *Element) SetWideBytes(x []byte) (*Element, error) {
+ if len(x) != 64 {
+ return nil, errors.New("edwards25519: invalid SetWideBytes input size")
+ }
+
+ // Split the 64 bytes into two elements, and extract the most significant
+ // bit of each, which is ignored by SetBytes.
+ lo, _ := new(Element).SetBytes(x[:32])
+ loMSB := uint64(x[31] >> 7)
+ hi, _ := new(Element).SetBytes(x[32:])
+ hiMSB := uint64(x[63] >> 7)
+
+ // The output we want is
+ //
+ // v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹
+ //
+ // which applying the reduction identity comes out to
+ //
+ // v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19²
+ //
+ // l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value
+ // (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value
+ // (hiMSB * 2 * 19²), so it fits in a uint64.
+
+ v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19
+ v.l1 = lo.l1 + hi.l1*2*19
+ v.l2 = lo.l2 + hi.l2*2*19
+ v.l3 = lo.l3 + hi.l3*2*19
+ v.l4 = lo.l4 + hi.l4*2*19
+
+ return v.carryPropagate(), nil
+}
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/vendor/filippo.io/edwards25519/field/fe_generic.go
similarity index 96%
rename from vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
rename to vendor/filippo.io/edwards25519/field/fe_generic.go
index 2671217d..86f5fd95 100644
--- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
+++ b/vendor/filippo.io/edwards25519/field/fe_generic.go
@@ -156,7 +156,7 @@ func feMulGeneric(v, a, b *Element) {
rr4 := r4.lo&maskLow51Bits + c3
// Now all coefficients fit into 64-bit registers but are still too large to
- // be passed around as a Element. We therefore do one last carry chain,
+ // be passed around as an Element. We therefore do one last carry chain,
// where the carries will be small enough to fit in the wiggle room above 2⁵¹.
*v = Element{rr0, rr1, rr2, rr3, rr4}
v.carryPropagate()
@@ -246,7 +246,7 @@ func feSquareGeneric(v, a *Element) {
}
// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
-// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline
+// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
func (v *Element) carryPropagateGeneric() *Element {
c0 := v.l0 >> 51
c1 := v.l1 >> 51
@@ -254,6 +254,8 @@ func (v *Element) carryPropagateGeneric() *Element {
c3 := v.l3 >> 51
c4 := v.l4 >> 51
+ // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
+ // the final l0 will be at most 52 bits. Similarly for the rest.
v.l0 = v.l0&maskLow51Bits + c4*19
v.l1 = v.l1&maskLow51Bits + c0
v.l2 = v.l2&maskLow51Bits + c1
diff --git a/vendor/filippo.io/edwards25519/scalar.go b/vendor/filippo.io/edwards25519/scalar.go
new file mode 100644
index 00000000..3fd16538
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalar.go
@@ -0,0 +1,343 @@
+// Copyright (c) 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "encoding/binary"
+ "errors"
+)
+
+// A Scalar is an integer modulo
+//
+// l = 2^252 + 27742317777372353535851937790883648493
+//
+// which is the prime order of the edwards25519 group.
+//
+// This type works similarly to math/big.Int, and all arguments and
+// receivers are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Scalar struct {
+ // s is the scalar in the Montgomery domain, in the format of the
+ // fiat-crypto implementation.
+ s fiatScalarMontgomeryDomainFieldElement
+}
+
+// The field implementation in scalar_fiat.go is generated by the fiat-crypto
+// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc)
+// from a formally verified model.
+//
+// fiat-crypto code comes under the following license.
+//
+// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
+// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+// NewScalar returns a new zero Scalar.
+func NewScalar() *Scalar {
+ return &Scalar{}
+}
+
+// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to
+// using Multiply and then Add.
+func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
+ // Make a copy of z in case it aliases s.
+ zCopy := new(Scalar).Set(z)
+ return s.Multiply(x, y).Add(s, zCopy)
+}
+
+// Add sets s = x + y mod l, and returns s.
+func (s *Scalar) Add(x, y *Scalar) *Scalar {
+ // s = 1 * x + y mod l
+ fiatScalarAdd(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Subtract sets s = x - y mod l, and returns s.
+func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
+ // s = -1 * y + x mod l
+ fiatScalarSub(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Negate sets s = -x mod l, and returns s.
+func (s *Scalar) Negate(x *Scalar) *Scalar {
+ // s = -1 * x + 0 mod l
+ fiatScalarOpp(&s.s, &x.s)
+ return s
+}
+
+// Multiply sets s = x * y mod l, and returns s.
+func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
+ // s = x * y + 0 mod l
+ fiatScalarMul(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Set sets s = x, and returns s.
+func (s *Scalar) Set(x *Scalar) *Scalar {
+ *s = *x
+ return s
+}
+
+// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
+// If x is not of the right length, SetUniformBytes returns nil and an error,
+// and the receiver is unchanged.
+//
+// SetUniformBytes can be used to set s to a uniformly distributed value given
+// 64 uniformly distributed random bytes.
+func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
+ if len(x) != 64 {
+ return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
+ }
+
+ // We have a value x of 512 bits, but our fiatScalarFromBytes function
+ // expects an input lower than l, which is a little over 252 bits.
+ //
+ // Instead of writing a reduction function that operates on wider inputs, we
+ // can interpret x as the sum of three shorter values a, b, and c.
+ //
+ // x = a + b * 2^168 + c * 2^336 mod l
+ //
+ // We then precompute 2^168 and 2^336 modulo l, and perform the reduction
+ // with two multiplications and two additions.
+
+ s.setShortBytes(x[:21])
+ t := new(Scalar).setShortBytes(x[21:42])
+ s.Add(s, t.Multiply(t, scalarTwo168))
+ t.setShortBytes(x[42:])
+ s.Add(s, t.Multiply(t, scalarTwo336))
+
+ return s, nil
+}
+
+// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a
+// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value
+// in the 2^256 Montgomery domain.
+var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7,
+ 0xa2c131b399411b7c, 0x6329a7ed9ce5a30}}
+var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b,
+ 0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}}
+
+// setShortBytes sets s = x mod l, where x is a little-endian integer shorter
+// than 32 bytes.
+func (s *Scalar) setShortBytes(x []byte) *Scalar {
+ if len(x) >= 32 {
+ panic("edwards25519: internal error: setShortBytes called with a long string")
+ }
+ var buf [32]byte
+ copy(buf[:], x)
+ fiatScalarFromBytes((*[4]uint64)(&s.s), &buf)
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+ return s
+}
+
+// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
+// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
+// returns nil and an error, and the receiver is unchanged.
+func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
+ if len(x) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+ if !isReduced(x) {
+ return nil, errors.New("invalid scalar encoding")
+ }
+
+ fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x))
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+
+ return s, nil
+}
+
+// scalarMinusOneBytes is l - 1 in little endian.
+var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}
+
+// isReduced returns whether the given scalar in 32-byte little endian encoded
+// form is reduced modulo l.
+func isReduced(s []byte) bool {
+ if len(s) != 32 {
+ return false
+ }
+
+ for i := len(s) - 1; i >= 0; i-- {
+ switch {
+ case s[i] > scalarMinusOneBytes[i]:
+ return false
+ case s[i] < scalarMinusOneBytes[i]:
+ return true
+ }
+ }
+ return true
+}
+
+// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
+// Section 5.1.5 (also known as clamping) and sets s to the result. The input
+// must be 32 bytes, and it is not modified. If x is not of the right length,
+// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
+//
+// Note that since Scalar values are always reduced modulo the prime order of
+// the curve, the resulting value will not preserve any of the cofactor-clearing
+// properties that clamping is meant to provide. It will however work as
+// expected as long as it is applied to points on the prime order subgroup, like
+// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
+// irrelevant RFC 7748 clamping, but it is now required for compatibility.
+func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
+ // The description above omits the purpose of the high bits of the clamping
+ // for brevity, but those are also lost to reductions, and are also
+ // irrelevant to edwards25519 as they protect against a specific
+ // implementation bug that was once observed in a generic Montgomery ladder.
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
+ }
+
+ // We need to use the wide reduction from SetUniformBytes, since clamping
+ // sets the 2^254 bit, making the value higher than the order.
+ var wideBytes [64]byte
+ copy(wideBytes[:], x[:])
+ wideBytes[0] &= 248
+ wideBytes[31] &= 63
+ wideBytes[31] |= 64
+ return s.SetUniformBytes(wideBytes[:])
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of s.
+func (s *Scalar) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var encoded [32]byte
+ return s.bytes(&encoded)
+}
+
+func (s *Scalar) bytes(out *[32]byte) []byte {
+ var ss fiatScalarNonMontgomeryDomainFieldElement
+ fiatScalarFromMontgomery(&ss, &s.s)
+ fiatScalarToBytes(out, (*[4]uint64)(&ss))
+ return out[:]
+}
+
+// Equal returns 1 if s and t are equal, and 0 otherwise.
+func (s *Scalar) Equal(t *Scalar) int {
+ var diff fiatScalarMontgomeryDomainFieldElement
+ fiatScalarSub(&diff, &s.s, &t.s)
+ var nonzero uint64
+ fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff))
+ nonzero |= nonzero >> 32
+ nonzero |= nonzero >> 16
+ nonzero |= nonzero >> 8
+ nonzero |= nonzero >> 4
+ nonzero |= nonzero >> 2
+ nonzero |= nonzero >> 1
+ return int(^nonzero) & 1
+}
+
+// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
+//
+// w must be between 2 and 8, or nonAdjacentForm will panic.
+func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
+ // This implementation is adapted from the one
+ // in curve25519-dalek and is documented there:
+ // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+ if w < 2 {
+ panic("w must be at least 2 by the definition of NAF")
+ } else if w > 8 {
+ panic("NAF digits must fit in int8")
+ }
+
+ var naf [256]int8
+ var digits [5]uint64
+
+ for i := 0; i < 4; i++ {
+ digits[i] = binary.LittleEndian.Uint64(b[i*8:])
+ }
+
+ width := uint64(1 << w)
+ windowMask := uint64(width - 1)
+
+ pos := uint(0)
+ carry := uint64(0)
+ for pos < 256 {
+ indexU64 := pos / 64
+ indexBit := pos % 64
+ var bitBuf uint64
+ if indexBit < 64-w {
+ // This window's bits are contained in a single u64
+ bitBuf = digits[indexU64] >> indexBit
+ } else {
+ // Combine the current 64 bits with bits from the next 64
+ bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
+ }
+
+ // Add carry into the current window
+ window := carry + (bitBuf & windowMask)
+
+ if window&1 == 0 {
+ // If the window value is even, preserve the carry and continue.
+ // Why is the carry preserved?
+ // If carry == 0 and window & 1 == 0,
+ // then the next carry should be 0
+ // If carry == 1 and window & 1 == 0,
+ // then bit_buf & 1 == 1 so the next carry should be 1
+ pos += 1
+ continue
+ }
+
+ if window < width/2 {
+ carry = 0
+ naf[pos] = int8(window)
+ } else {
+ carry = 1
+ naf[pos] = int8(window) - int8(width)
+ }
+
+ pos += w
+ }
+ return naf
+}
+
+func (s *Scalar) signedRadix16() [64]int8 {
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+
+ var digits [64]int8
+
+ // Compute unsigned radix-16 digits:
+ for i := 0; i < 32; i++ {
+ digits[2*i] = int8(b[i] & 15)
+ digits[2*i+1] = int8((b[i] >> 4) & 15)
+ }
+
+ // Recenter coefficients:
+ for i := 0; i < 63; i++ {
+ carry := (digits[i] + 8) >> 4
+ digits[i] -= carry << 4
+ digits[i+1] += carry
+ }
+
+ return digits
+}
diff --git a/vendor/filippo.io/edwards25519/scalar_fiat.go b/vendor/filippo.io/edwards25519/scalar_fiat.go
new file mode 100644
index 00000000..2e5782b6
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalar_fiat.go
@@ -0,0 +1,1147 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name edwards25519 Scalar 64 '2^252 + 27742317777372353535851937790883648493' mul add sub opp nonzero from_montgomery to_montgomery to_bytes from_bytes
+//
+// curve description: Scalar
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, add, sub, opp, nonzero, from_montgomery, to_montgomery, to_bytes, from_bytes
+//
+// m = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed (from "2^252 + 27742317777372353535851937790883648493")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package edwards25519
+
+import "math/bits"
+
+type fiatScalarUint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type fiatScalarInt1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type fiatScalarMontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarMontgomeryDomainFieldElement [4]uint64
+
+// The type fiatScalarNonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarNonMontgomeryDomainFieldElement [4]uint64
+
+// fiatScalarCmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarCmovznzU64(out1 *uint64, arg1 fiatScalarUint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// fiatScalarMul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarMul(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg2[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg2[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg2[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg2[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ x19 := (uint64(fiatScalarUint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0x1000000000000000)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0x14def9dea2f79cd6)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0x5812631a5cf5d3ed)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ x30 := (uint64(fiatScalarUint1(x29)) + x25)
+ var x32 uint64
+ _, x32 = bits.Add64(x11, x26, uint64(0x0))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x13, x28, uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x15, x30, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x17, x22, uint64(fiatScalarUint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x19, x23, uint64(fiatScalarUint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, arg2[3])
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg2[2])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg2[1])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg2[0])
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x48, x45, uint64(0x0))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x46, x43, uint64(fiatScalarUint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x44, x41, uint64(fiatScalarUint1(x52)))
+ x55 := (uint64(fiatScalarUint1(x54)) + x42)
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x33, x47, uint64(0x0))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(fiatScalarUint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(uint64(fiatScalarUint1(x40)), x55, uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x56, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ x76 := (uint64(fiatScalarUint1(x75)) + x71)
+ var x78 uint64
+ _, x78 = bits.Add64(x56, x72, uint64(0x0))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x58, x74, uint64(fiatScalarUint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x60, x76, uint64(fiatScalarUint1(x80)))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x62, x68, uint64(fiatScalarUint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x64, x69, uint64(fiatScalarUint1(x84)))
+ x87 := (uint64(fiatScalarUint1(x86)) + uint64(fiatScalarUint1(x65)))
+ var x88 uint64
+ var x89 uint64
+ x89, x88 = bits.Mul64(x2, arg2[3])
+ var x90 uint64
+ var x91 uint64
+ x91, x90 = bits.Mul64(x2, arg2[2])
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg2[1])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg2[0])
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x95, x92, uint64(0x0))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x93, x90, uint64(fiatScalarUint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x91, x88, uint64(fiatScalarUint1(x99)))
+ x102 := (uint64(fiatScalarUint1(x101)) + x89)
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x79, x94, uint64(0x0))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x81, x96, uint64(fiatScalarUint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(fiatScalarUint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(fiatScalarUint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ _, x113 = bits.Mul64(x103, 0xd2b51da312547e1b)
+ var x115 uint64
+ var x116 uint64
+ x116, x115 = bits.Mul64(x113, 0x1000000000000000)
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x113, 0x14def9dea2f79cd6)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x113, 0x5812631a5cf5d3ed)
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x120, x117, uint64(0x0))
+ x123 := (uint64(fiatScalarUint1(x122)) + x118)
+ var x125 uint64
+ _, x125 = bits.Add64(x103, x119, uint64(0x0))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x105, x121, uint64(fiatScalarUint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x123, uint64(fiatScalarUint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x109, x115, uint64(fiatScalarUint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x111, x116, uint64(fiatScalarUint1(x131)))
+ x134 := (uint64(fiatScalarUint1(x133)) + uint64(fiatScalarUint1(x112)))
+ var x135 uint64
+ var x136 uint64
+ x136, x135 = bits.Mul64(x3, arg2[3])
+ var x137 uint64
+ var x138 uint64
+ x138, x137 = bits.Mul64(x3, arg2[2])
+ var x139 uint64
+ var x140 uint64
+ x140, x139 = bits.Mul64(x3, arg2[1])
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg2[0])
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x142, x139, uint64(0x0))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x140, x137, uint64(fiatScalarUint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x138, x135, uint64(fiatScalarUint1(x146)))
+ x149 := (uint64(fiatScalarUint1(x148)) + x136)
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x126, x141, uint64(0x0))
+ var x152 uint64
+ var x153 uint64
+ x152, x153 = bits.Add64(x128, x143, uint64(fiatScalarUint1(x151)))
+ var x154 uint64
+ var x155 uint64
+ x154, x155 = bits.Add64(x130, x145, uint64(fiatScalarUint1(x153)))
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(fiatScalarUint1(x155)))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(fiatScalarUint1(x157)))
+ var x160 uint64
+ _, x160 = bits.Mul64(x150, 0xd2b51da312547e1b)
+ var x162 uint64
+ var x163 uint64
+ x163, x162 = bits.Mul64(x160, 0x1000000000000000)
+ var x164 uint64
+ var x165 uint64
+ x165, x164 = bits.Mul64(x160, 0x14def9dea2f79cd6)
+ var x166 uint64
+ var x167 uint64
+ x167, x166 = bits.Mul64(x160, 0x5812631a5cf5d3ed)
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x167, x164, uint64(0x0))
+ x170 := (uint64(fiatScalarUint1(x169)) + x165)
+ var x172 uint64
+ _, x172 = bits.Add64(x150, x166, uint64(0x0))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x152, x168, uint64(fiatScalarUint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x154, x170, uint64(fiatScalarUint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x156, x162, uint64(fiatScalarUint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x158, x163, uint64(fiatScalarUint1(x178)))
+ x181 := (uint64(fiatScalarUint1(x180)) + uint64(fiatScalarUint1(x159)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Sub64(x173, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Sub64(x175, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Sub64(x177, uint64(0x0), uint64(fiatScalarUint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Sub64(x179, 0x1000000000000000, uint64(fiatScalarUint1(x187)))
+ var x191 uint64
+ _, x191 = bits.Sub64(x181, uint64(0x0), uint64(fiatScalarUint1(x189)))
+ var x192 uint64
+ fiatScalarCmovznzU64(&x192, fiatScalarUint1(x191), x182, x173)
+ var x193 uint64
+ fiatScalarCmovznzU64(&x193, fiatScalarUint1(x191), x184, x175)
+ var x194 uint64
+ fiatScalarCmovznzU64(&x194, fiatScalarUint1(x191), x186, x177)
+ var x195 uint64
+ fiatScalarCmovznzU64(&x195, fiatScalarUint1(x191), x188, x179)
+ out1[0] = x192
+ out1[1] = x193
+ out1[2] = x194
+ out1[3] = x195
+}
+
+// fiatScalarAdd adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarAdd(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(x1, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(x3, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(fiatScalarUint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x7, 0x1000000000000000, uint64(fiatScalarUint1(x14)))
+ var x18 uint64
+ _, x18 = bits.Sub64(uint64(fiatScalarUint1(x8)), uint64(0x0), uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ fiatScalarCmovznzU64(&x19, fiatScalarUint1(x18), x9, x1)
+ var x20 uint64
+ fiatScalarCmovznzU64(&x20, fiatScalarUint1(x18), x11, x3)
+ var x21 uint64
+ fiatScalarCmovznzU64(&x21, fiatScalarUint1(x18), x13, x5)
+ var x22 uint64
+ fiatScalarCmovznzU64(&x22, fiatScalarUint1(x18), x15, x7)
+ out1[0] = x19
+ out1[1] = x20
+ out1[2] = x21
+ out1[3] = x22
+}
+
+// fiatScalarSub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarSub(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarOpp negates a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarOpp(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(uint64(0x0), arg1[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(uint64(0x0), arg1[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(uint64(0x0), arg1[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(uint64(0x0), arg1[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarNonzero outputs a single non-zero word if the input is non-zero and zero otherwise.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarNonzero(out1 *uint64, arg1 *[4]uint64) {
+ x1 := (arg1[0] | (arg1[1] | (arg1[2] | arg1[3])))
+ *out1 = x1
+}
+
+// fiatScalarFromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarFromMontgomery(out1 *fiatScalarNonMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0xd2b51da312547e1b)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0x1000000000000000)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0x14def9dea2f79cd6)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0x5812631a5cf5d3ed)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x9, x6, uint64(0x0))
+ var x13 uint64
+ _, x13 = bits.Add64(x1, x8, uint64(0x0))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(uint64(0x0), x10, uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x14, arg1[1], uint64(0x0))
+ var x18 uint64
+ _, x18 = bits.Mul64(x16, 0xd2b51da312547e1b)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x18, 0x1000000000000000)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x18, 0x14def9dea2f79cd6)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x18, 0x5812631a5cf5d3ed)
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x25, x22, uint64(0x0))
+ var x29 uint64
+ _, x29 = bits.Add64(x16, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64((uint64(fiatScalarUint1(x17)) + (uint64(fiatScalarUint1(x15)) + (uint64(fiatScalarUint1(x11)) + x7))), x26, uint64(fiatScalarUint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x4, (uint64(fiatScalarUint1(x27)) + x23), uint64(fiatScalarUint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x5, x20, uint64(fiatScalarUint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x30, arg1[2], uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x32, uint64(0x0), uint64(fiatScalarUint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x34, uint64(0x0), uint64(fiatScalarUint1(x39)))
+ var x42 uint64
+ _, x42 = bits.Mul64(x36, 0xd2b51da312547e1b)
+ var x44 uint64
+ var x45 uint64
+ x45, x44 = bits.Mul64(x42, 0x1000000000000000)
+ var x46 uint64
+ var x47 uint64
+ x47, x46 = bits.Mul64(x42, 0x14def9dea2f79cd6)
+ var x48 uint64
+ var x49 uint64
+ x49, x48 = bits.Mul64(x42, 0x5812631a5cf5d3ed)
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x49, x46, uint64(0x0))
+ var x53 uint64
+ _, x53 = bits.Add64(x36, x48, uint64(0x0))
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x38, x50, uint64(fiatScalarUint1(x53)))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x40, (uint64(fiatScalarUint1(x51)) + x47), uint64(fiatScalarUint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64((uint64(fiatScalarUint1(x41)) + (uint64(fiatScalarUint1(x35)) + x21)), x44, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x54, arg1[3], uint64(0x0))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x56, uint64(0x0), uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x58, uint64(0x0), uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x60, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ var x77 uint64
+ _, x77 = bits.Add64(x60, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x62, x74, uint64(fiatScalarUint1(x77)))
+ var x80 uint64
+ var x81 uint64
+ x80, x81 = bits.Add64(x64, (uint64(fiatScalarUint1(x75)) + x71), uint64(fiatScalarUint1(x79)))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64((uint64(fiatScalarUint1(x65)) + (uint64(fiatScalarUint1(x59)) + x45)), x68, uint64(fiatScalarUint1(x81)))
+ x84 := (uint64(fiatScalarUint1(x83)) + x69)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Sub64(x78, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Sub64(x80, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Sub64(x82, uint64(0x0), uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Sub64(x84, 0x1000000000000000, uint64(fiatScalarUint1(x90)))
+ var x94 uint64
+ _, x94 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ fiatScalarCmovznzU64(&x95, fiatScalarUint1(x94), x85, x78)
+ var x96 uint64
+ fiatScalarCmovznzU64(&x96, fiatScalarUint1(x94), x87, x80)
+ var x97 uint64
+ fiatScalarCmovznzU64(&x97, fiatScalarUint1(x94), x89, x82)
+ var x98 uint64
+ fiatScalarCmovznzU64(&x98, fiatScalarUint1(x94), x91, x84)
+ out1[0] = x95
+ out1[1] = x96
+ out1[2] = x97
+ out1[3] = x98
+}
+
+// fiatScalarToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func fiatScalarToMontgomery(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarNonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, 0x399411b7c309a3d)
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, 0xceec73d217f5be65)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, 0xd00e1ba768859347)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, 0xa40611e3449c0f01)
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ _, x19 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x19, 0x1000000000000000)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x19, 0x14def9dea2f79cd6)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x19, 0x5812631a5cf5d3ed)
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x26, x23, uint64(0x0))
+ var x30 uint64
+ _, x30 = bits.Add64(x11, x25, uint64(0x0))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x13, x27, uint64(fiatScalarUint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x15, (uint64(fiatScalarUint1(x28)) + x24), uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x17, x21, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x38, x37 = bits.Mul64(x1, 0x399411b7c309a3d)
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, 0xceec73d217f5be65)
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, 0xd00e1ba768859347)
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, 0xa40611e3449c0f01)
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64(x44, x41, uint64(0x0))
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x42, x39, uint64(fiatScalarUint1(x46)))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x40, x37, uint64(fiatScalarUint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x31, x43, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x33, x45, uint64(fiatScalarUint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x35, x47, uint64(fiatScalarUint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(((uint64(fiatScalarUint1(x36)) + (uint64(fiatScalarUint1(x18)) + x6)) + x22), x49, uint64(fiatScalarUint1(x56)))
+ var x59 uint64
+ _, x59 = bits.Mul64(x51, 0xd2b51da312547e1b)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x59, 0x1000000000000000)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x59, 0x14def9dea2f79cd6)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x59, 0x5812631a5cf5d3ed)
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x66, x63, uint64(0x0))
+ var x70 uint64
+ _, x70 = bits.Add64(x51, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x53, x67, uint64(fiatScalarUint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x55, (uint64(fiatScalarUint1(x68)) + x64), uint64(fiatScalarUint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x57, x61, uint64(fiatScalarUint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x2, 0x399411b7c309a3d)
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x2, 0xceec73d217f5be65)
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x2, 0xd00e1ba768859347)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x2, 0xa40611e3449c0f01)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x84, x81, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x82, x79, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x80, x77, uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Add64(x71, x83, uint64(0x0))
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x73, x85, uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x75, x87, uint64(fiatScalarUint1(x94)))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(((uint64(fiatScalarUint1(x76)) + (uint64(fiatScalarUint1(x58)) + (uint64(fiatScalarUint1(x50)) + x38))) + x62), x89, uint64(fiatScalarUint1(x96)))
+ var x99 uint64
+ _, x99 = bits.Mul64(x91, 0xd2b51da312547e1b)
+ var x101 uint64
+ var x102 uint64
+ x102, x101 = bits.Mul64(x99, 0x1000000000000000)
+ var x103 uint64
+ var x104 uint64
+ x104, x103 = bits.Mul64(x99, 0x14def9dea2f79cd6)
+ var x105 uint64
+ var x106 uint64
+ x106, x105 = bits.Mul64(x99, 0x5812631a5cf5d3ed)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x106, x103, uint64(0x0))
+ var x110 uint64
+ _, x110 = bits.Add64(x91, x105, uint64(0x0))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x93, x107, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x95, (uint64(fiatScalarUint1(x108)) + x104), uint64(fiatScalarUint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x97, x101, uint64(fiatScalarUint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x3, 0x399411b7c309a3d)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x3, 0xceec73d217f5be65)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x3, 0xd00e1ba768859347)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x3, 0xa40611e3449c0f01)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(fiatScalarUint1(x126)))
+ var x129 uint64
+ var x130 uint64
+ x129, x130 = bits.Add64(x120, x117, uint64(fiatScalarUint1(x128)))
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x111, x123, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x113, x125, uint64(fiatScalarUint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x115, x127, uint64(fiatScalarUint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(((uint64(fiatScalarUint1(x116)) + (uint64(fiatScalarUint1(x98)) + (uint64(fiatScalarUint1(x90)) + x78))) + x102), x129, uint64(fiatScalarUint1(x136)))
+ var x139 uint64
+ _, x139 = bits.Mul64(x131, 0xd2b51da312547e1b)
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x139, 0x1000000000000000)
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x139, 0x14def9dea2f79cd6)
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x139, 0x5812631a5cf5d3ed)
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x146, x143, uint64(0x0))
+ var x150 uint64
+ _, x150 = bits.Add64(x131, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x133, x147, uint64(fiatScalarUint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x135, (uint64(fiatScalarUint1(x148)) + x144), uint64(fiatScalarUint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x155, x156 = bits.Add64(x137, x141, uint64(fiatScalarUint1(x154)))
+ x157 := ((uint64(fiatScalarUint1(x156)) + (uint64(fiatScalarUint1(x138)) + (uint64(fiatScalarUint1(x130)) + x118))) + x142)
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Sub64(x151, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Sub64(x153, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Sub64(x155, uint64(0x0), uint64(fiatScalarUint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Sub64(x157, 0x1000000000000000, uint64(fiatScalarUint1(x163)))
+ var x167 uint64
+ _, x167 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x165)))
+ var x168 uint64
+ fiatScalarCmovznzU64(&x168, fiatScalarUint1(x167), x158, x151)
+ var x169 uint64
+ fiatScalarCmovznzU64(&x169, fiatScalarUint1(x167), x160, x153)
+ var x170 uint64
+ fiatScalarCmovznzU64(&x170, fiatScalarUint1(x167), x162, x155)
+ var x171 uint64
+ fiatScalarCmovznzU64(&x171, fiatScalarUint1(x167), x164, x157)
+ out1[0] = x168
+ out1[1] = x169
+ out1[2] = x170
+ out1[3] = x171
+}
+
+// fiatScalarToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+func fiatScalarToBytes(out1 *[32]uint8, arg1 *[4]uint64) {
+ x1 := arg1[3]
+ x2 := arg1[2]
+ x3 := arg1[1]
+ x4 := arg1[0]
+ x5 := (uint8(x4) & 0xff)
+ x6 := (x4 >> 8)
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := uint8((x16 >> 8))
+ x19 := (uint8(x3) & 0xff)
+ x20 := (x3 >> 8)
+ x21 := (uint8(x20) & 0xff)
+ x22 := (x20 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := uint8((x30 >> 8))
+ x33 := (uint8(x2) & 0xff)
+ x34 := (x2 >> 8)
+ x35 := (uint8(x34) & 0xff)
+ x36 := (x34 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := uint8((x44 >> 8))
+ x47 := (uint8(x1) & 0xff)
+ x48 := (x1 >> 8)
+ x49 := (uint8(x48) & 0xff)
+ x50 := (x48 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := (x50 >> 8)
+ x53 := (uint8(x52) & 0xff)
+ x54 := (x52 >> 8)
+ x55 := (uint8(x54) & 0xff)
+ x56 := (x54 >> 8)
+ x57 := (uint8(x56) & 0xff)
+ x58 := (x56 >> 8)
+ x59 := (uint8(x58) & 0xff)
+ x60 := uint8((x58 >> 8))
+ out1[0] = x5
+ out1[1] = x7
+ out1[2] = x9
+ out1[3] = x11
+ out1[4] = x13
+ out1[5] = x15
+ out1[6] = x17
+ out1[7] = x18
+ out1[8] = x19
+ out1[9] = x21
+ out1[10] = x23
+ out1[11] = x25
+ out1[12] = x27
+ out1[13] = x29
+ out1[14] = x31
+ out1[15] = x32
+ out1[16] = x33
+ out1[17] = x35
+ out1[18] = x37
+ out1[19] = x39
+ out1[20] = x41
+ out1[21] = x43
+ out1[22] = x45
+ out1[23] = x46
+ out1[24] = x47
+ out1[25] = x49
+ out1[26] = x51
+ out1[27] = x53
+ out1[28] = x55
+ out1[29] = x57
+ out1[30] = x59
+ out1[31] = x60
+}
+
+// fiatScalarFromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+func fiatScalarFromBytes(out1 *[4]uint64, arg1 *[32]uint8) {
+ x1 := (uint64(arg1[31]) << 56)
+ x2 := (uint64(arg1[30]) << 48)
+ x3 := (uint64(arg1[29]) << 40)
+ x4 := (uint64(arg1[28]) << 32)
+ x5 := (uint64(arg1[27]) << 24)
+ x6 := (uint64(arg1[26]) << 16)
+ x7 := (uint64(arg1[25]) << 8)
+ x8 := arg1[24]
+ x9 := (uint64(arg1[23]) << 56)
+ x10 := (uint64(arg1[22]) << 48)
+ x11 := (uint64(arg1[21]) << 40)
+ x12 := (uint64(arg1[20]) << 32)
+ x13 := (uint64(arg1[19]) << 24)
+ x14 := (uint64(arg1[18]) << 16)
+ x15 := (uint64(arg1[17]) << 8)
+ x16 := arg1[16]
+ x17 := (uint64(arg1[15]) << 56)
+ x18 := (uint64(arg1[14]) << 48)
+ x19 := (uint64(arg1[13]) << 40)
+ x20 := (uint64(arg1[12]) << 32)
+ x21 := (uint64(arg1[11]) << 24)
+ x22 := (uint64(arg1[10]) << 16)
+ x23 := (uint64(arg1[9]) << 8)
+ x24 := arg1[8]
+ x25 := (uint64(arg1[7]) << 56)
+ x26 := (uint64(arg1[6]) << 48)
+ x27 := (uint64(arg1[5]) << 40)
+ x28 := (uint64(arg1[4]) << 32)
+ x29 := (uint64(arg1[3]) << 24)
+ x30 := (uint64(arg1[2]) << 16)
+ x31 := (uint64(arg1[1]) << 8)
+ x32 := arg1[0]
+ x33 := (x31 + uint64(x32))
+ x34 := (x30 + x33)
+ x35 := (x29 + x34)
+ x36 := (x28 + x35)
+ x37 := (x27 + x36)
+ x38 := (x26 + x37)
+ x39 := (x25 + x38)
+ x40 := (x23 + uint64(x24))
+ x41 := (x22 + x40)
+ x42 := (x21 + x41)
+ x43 := (x20 + x42)
+ x44 := (x19 + x43)
+ x45 := (x18 + x44)
+ x46 := (x17 + x45)
+ x47 := (x15 + uint64(x16))
+ x48 := (x14 + x47)
+ x49 := (x13 + x48)
+ x50 := (x12 + x49)
+ x51 := (x11 + x50)
+ x52 := (x10 + x51)
+ x53 := (x9 + x52)
+ x54 := (x7 + uint64(x8))
+ x55 := (x6 + x54)
+ x56 := (x5 + x55)
+ x57 := (x4 + x56)
+ x58 := (x3 + x57)
+ x59 := (x2 + x58)
+ x60 := (x1 + x59)
+ out1[0] = x39
+ out1[1] = x46
+ out1[2] = x53
+ out1[3] = x60
+}
diff --git a/vendor/filippo.io/edwards25519/scalarmult.go b/vendor/filippo.io/edwards25519/scalarmult.go
new file mode 100644
index 00000000..f7ca3cef
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalarmult.go
@@ -0,0 +1,214 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import "sync"
+
+// basepointTable is a set of 32 affineLookupTables, where table i is generated
+// from 256i * basepoint. It is precomputed the first time it's used.
+func basepointTable() *[32]affineLookupTable {
+ basepointTablePrecomp.initOnce.Do(func() {
+ p := NewGeneratorPoint()
+ for i := 0; i < 32; i++ {
+ basepointTablePrecomp.table[i].FromP3(p)
+ for j := 0; j < 8; j++ {
+ p.Add(p, p)
+ }
+ }
+ })
+ return &basepointTablePrecomp.table
+}
+
+var basepointTablePrecomp struct {
+ table [32]affineLookupTable
+ initOnce sync.Once
+}
+
+// ScalarBaseMult sets v = x * B, where B is the canonical generator, and
+// returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarBaseMult(x *Scalar) *Point {
+ basepointTable := basepointTable()
+
+ // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i )
+ // as described in the Ed25519 paper
+ //
+ // Group even and odd coefficients
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B)
+ //
+ // We use a lookup table for each i to get x_i*16^(2*i)*B
+ // and do four doublings to multiply by 16.
+ digits := x.signedRadix16()
+
+ multiple := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+
+ // Accumulate the odd components first
+ v.Set(NewIdentityPoint())
+ for i := 1; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ // Multiply by 16
+ tmp2.FromP3(v) // tmp2 = v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords
+ v.fromP1xP1(tmp1) // now v = 16*(odd components)
+
+ // Accumulate the even components
+ for i := 0; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ return v
+}
+
+// ScalarMult sets v = x * q, and returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarMult(x *Scalar, q *Point) *Point {
+ checkInitialized(q)
+
+ var table projLookupTable
+ table.FromP3(q)
+
+ // Write x = sum(x_i * 16^i)
+ // so x*Q = sum( Q*x_i*16^i )
+ // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... )
+ // <------compute inside out---------
+ //
+ // We use the lookup table to get the x_i*Q values
+ // and do four doublings to compute 16*Q
+ digits := x.signedRadix16()
+
+ // Unwrap first loop iteration to save computing 16*identity
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ table.SelectInto(multiple, digits[63])
+
+ v.Set(NewIdentityPoint())
+ tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords
+ for i := 62; i >= 0; i-- {
+ tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
+ v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
+ table.SelectInto(multiple, digits[i])
+ tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords
+ }
+ v.fromP1xP1(tmp1)
+ return v
+}
+
+// basepointNafTable is the nafLookupTable8 for the basepoint.
+// It is precomputed the first time it's used.
+func basepointNafTable() *nafLookupTable8 {
+ basepointNafTablePrecomp.initOnce.Do(func() {
+ basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint())
+ })
+ return &basepointNafTablePrecomp.table
+}
+
+var basepointNafTablePrecomp struct {
+ table nafLookupTable8
+ initOnce sync.Once
+}
+
+// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical
+// generator, and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point {
+ checkInitialized(A)
+
+ // Similarly to the single variable-base approach, we compute
+ // digits and use them with a lookup table. However, because
+ // we are allowed to do variable-time operations, we don't
+ // need constant-time lookups or constant-time digit
+ // computations.
+ //
+ // So we use a non-adjacent form of some width w instead of
+ // radix 16. This is like a binary representation (one digit
+ // for each binary place) but we allow the digits to grow in
+ // magnitude up to 2^{w-1} so that the nonzero digits are as
+ // sparse as possible. Intuitively, this "condenses" the
+ // "mass" of the scalar onto sparse coefficients (meaning
+ // fewer additions).
+
+ basepointNafTable := basepointNafTable()
+ var aTable nafLookupTable5
+ aTable.FromP3(A)
+ // Because the basepoint is fixed, we can use a wider NAF
+ // corresponding to a bigger table.
+ aNaf := a.nonAdjacentForm(5)
+ bNaf := b.nonAdjacentForm(8)
+
+ // Find the first nonzero coefficient.
+ i := 255
+ for j := i; j >= 0; j-- {
+ if aNaf[j] != 0 || bNaf[j] != 0 {
+ break
+ }
+ }
+
+ multA := &projCached{}
+ multB := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp2.Zero()
+
+ // Move from high to low bits, doubling the accumulator
+ // at each iteration and checking whether there is a nonzero
+ // coefficient to look up a multiple of.
+ for ; i >= 0; i-- {
+ tmp1.Double(tmp2)
+
+ // Only update v if we have a nonzero coeff to add in.
+ if aNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, aNaf[i])
+ tmp1.Add(v, multA)
+ } else if aNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, -aNaf[i])
+ tmp1.Sub(v, multA)
+ }
+
+ if bNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, bNaf[i])
+ tmp1.AddAffine(v, multB)
+ } else if bNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, -bNaf[i])
+ tmp1.SubAffine(v, multB)
+ }
+
+ tmp2.FromP1xP1(tmp1)
+ }
+
+ v.fromP2(tmp2)
+ return v
+}
diff --git a/vendor/filippo.io/edwards25519/tables.go b/vendor/filippo.io/edwards25519/tables.go
new file mode 100644
index 00000000..83234bbc
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/tables.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/subtle"
+)
+
+// A dynamic lookup table for variable-base, constant-time scalar muls.
+type projLookupTable struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, constant-time scalar muls.
+type affineLookupTable struct {
+ points [8]affineCached
+}
+
+// A dynamic lookup table for variable-base, variable-time scalar muls.
+type nafLookupTable5 struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, variable-time scalar muls.
+type nafLookupTable8 struct {
+ points [64]affineCached
+}
+
+// Constructors.
+
+// Builds a lookup table at runtime. Fast.
+func (v *projLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to a projCached
+ // This is needlessly complicated because the API has explicit
+ // receivers instead of creating stack objects and relying on RVO
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *affineLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to affineCached
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
+ }
+}
+
+// Builds a lookup table at runtime. Fast.
+func (v *nafLookupTable5) FromP3(q *Point) {
+ // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q
+ // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *nafLookupTable8) FromP3(q *Point) {
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 63; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i])))
+ }
+}
+
+// Selectors.
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *projLookupTable) SelectInto(dest *projCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Given odd x with 0 < x < 2^4, return x*Q (in variable time).
+func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) {
+ *dest = v.points[x/2]
+}
+
+// Given odd x with 0 < x < 2^7, return x*Q (in variable time).
+func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) {
+ *dest = v.points[x/2]
+}
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
index 3651cfa9..235496ee 100644
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -3,13 +3,13 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
-Documentation: https://godocs.io/github.com/BurntSushi/toml
+Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show
v0.4.0`).
-This library requires Go 1.13 or newer; add it to your go.mod with:
+This library requires Go 1.18 or newer; add it to your go.mod with:
% go get github.com/BurntSushi/toml@latest
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
index 0ca1dc4f..3fa516ca 100644
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -6,7 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"math"
"os"
"reflect"
@@ -18,13 +18,13 @@ import (
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
- UnmarshalTOML(interface{}) error
+ UnmarshalTOML(any) error
}
// Unmarshal decodes the contents of data in TOML format into a pointer v.
//
// See [Decoder] for a description of the decoding process.
-func Unmarshal(data []byte, v interface{}) error {
+func Unmarshal(data []byte, v any) error {
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
return err
}
@@ -32,12 +32,12 @@ func Unmarshal(data []byte, v interface{}) error {
// Decode the TOML data in to the pointer v.
//
// See [Decoder] for a description of the decoding process.
-func Decode(data string, v interface{}) (MetaData, error) {
+func Decode(data string, v any) (MetaData, error) {
return NewDecoder(strings.NewReader(data)).Decode(v)
}
// DecodeFile reads the contents of a file and decodes it with [Decode].
-func DecodeFile(path string, v interface{}) (MetaData, error) {
+func DecodeFile(path string, v any) (MetaData, error) {
fp, err := os.Open(path)
if err != nil {
return MetaData{}, err
@@ -46,6 +46,17 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
return NewDecoder(fp).Decode(v)
}
+// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
+// [Decode].
+func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) {
+ fp, err := fsys.Open(path)
+ if err != nil {
+ return MetaData{}, err
+ }
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
+}
+
// Primitive is a TOML value that hasn't been decoded into a Go value.
//
// This type can be used for any value, which will cause decoding to be delayed.
@@ -58,7 +69,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
// overhead of reflection. They can be useful when you don't know the exact type
// of TOML data until runtime.
type Primitive struct {
- undecoded interface{}
+ undecoded any
context Key
}
@@ -91,7 +102,7 @@ const (
// UnmarshalText method. See the Unmarshaler example for a demonstration with
// email addresses.
//
-// ### Key mapping
+// # Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go struct.
// The special `toml` struct tag can be used to map TOML keys to struct fields
@@ -122,7 +133,7 @@ var (
)
// Decode TOML data in to the pointer `v`.
-func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
+func (dec *Decoder) Decode(v any) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
s := "%q"
@@ -136,8 +147,8 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
}
- // Check if this is a supported type: struct, map, interface{}, or something
- // that implements UnmarshalTOML or UnmarshalText.
+ // Check if this is a supported type: struct, map, any, or something that
+ // implements UnmarshalTOML or UnmarshalText.
rv = indirect(rv)
rt := rv.Type()
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
@@ -148,7 +159,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
// TODO: parser should read from io.Reader? Or at the very least, make it
// read from []byte rather than string
- data, err := ioutil.ReadAll(dec.r)
+ data, err := io.ReadAll(dec.r)
if err != nil {
return MetaData{}, err
}
@@ -179,18 +190,31 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
// will only reflect keys that were decoded. Namely, any keys hidden behind a
// Primitive will be considered undecoded. Executing this method will update the
// undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
}
+// markDecodedRecursive is a helper to mark any key under the given tmap as
+// decoded, recursing as needed
+func markDecodedRecursive(md *MetaData, tmap map[string]any) {
+ for key := range tmap {
+ md.decoded[md.context.add(key).String()] = struct{}{}
+ if tmap, ok := tmap[key].(map[string]any); ok {
+ md.context = append(md.context, key)
+ markDecodedRecursive(md, tmap)
+ md.context = md.context[0 : len(md.context)-1]
+ }
+ }
+}
+
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
-func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unify(data any, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
// TODO: #76 would make this superfluous after implemented.
if rv.Type() == primitiveType {
@@ -207,7 +231,21 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
rvi := rv.Interface()
if v, ok := rvi.(Unmarshaler); ok {
- return v.UnmarshalTOML(data)
+ err := v.UnmarshalTOML(data)
+ if err != nil {
+ return md.parseErr(err)
+ }
+ // Assume the Unmarshaler decoded everything, so mark all keys under
+ // this table as decoded.
+ if tmap, ok := data.(map[string]any); ok {
+ markDecodedRecursive(md, tmap)
+ }
+ if aot, ok := data.([]map[string]any); ok {
+ for _, tmap := range aot {
+ markDecodedRecursive(md, tmap)
+ }
+ }
+ return nil
}
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
return md.unifyText(data, v)
@@ -227,14 +265,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
return md.unifyInt(data, rv)
}
switch k {
- case reflect.Ptr:
- elem := reflect.New(rv.Type().Elem())
- err := md.unify(data, reflect.Indirect(elem))
- if err != nil {
- return err
- }
- rv.Set(elem)
- return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
@@ -248,7 +278,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
- if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
+ if rv.NumMethod() > 0 { /// Only empty interfaces are supported.
return md.e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
@@ -258,14 +288,13 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
return md.e("unsupported type %s", rv.Kind())
}
-func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
+func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]any)
if !ok {
if mapping == nil {
return nil
}
- return md.e("type mismatch for %s: expected table but found %T",
- rv.Type().String(), mapping)
+ return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping))
}
for key, datum := range tmap {
@@ -304,14 +333,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error {
keyType := rv.Type().Key().Kind()
if keyType != reflect.String && keyType != reflect.Interface {
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
keyType, rv.Type())
}
- tmap, ok := mapping.(map[string]interface{})
+ tmap, ok := mapping.(map[string]any)
if !ok {
if tmap == nil {
return nil
@@ -347,7 +376,7 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyArray(data any, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
@@ -361,7 +390,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
return md.unifySliceArray(datav, rv)
}
-func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifySlice(data any, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
@@ -388,7 +417,7 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyString(data any, rv reflect.Value) error {
_, ok := rv.Interface().(json.Number)
if ok {
if i, ok := data.(int64); ok {
@@ -408,7 +437,7 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
return md.badtype("string", data)
}
-func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error {
rvk := rv.Kind()
if num, ok := data.(float64); ok {
@@ -429,7 +458,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
- return md.parseErr(errParseRange{i: num, size: rvk.String()})
+ return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()})
}
rv.SetFloat(float64(num))
return nil
@@ -438,7 +467,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
return md.badtype("float", data)
}
-func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyInt(data any, rv reflect.Value) error {
_, ok := rv.Interface().(time.Duration)
if ok {
// Parse as string duration, and fall back to regular integer parsing
@@ -481,7 +510,7 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyBool(data any, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
@@ -489,12 +518,12 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
return md.badtype("boolean", data)
}
-func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyAnything(data any, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
-func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
+func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case Marshaler:
@@ -523,27 +552,29 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
return md.badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
- return err
+ return md.parseErr(err)
}
return nil
}
-func (md *MetaData) badtype(dst string, data interface{}) error {
- return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
+func (md *MetaData) badtype(dst string, data any) error {
+ return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst)
}
func (md *MetaData) parseErr(err error) error {
k := md.context.String()
+ d := string(md.data)
return ParseError{
- LastKey: k,
- Position: md.keyInfo[k].pos,
- Line: md.keyInfo[k].pos.Line,
+ Message: err.Error(),
err: err,
- input: string(md.data),
+ LastKey: k,
+ Position: md.keyInfo[k].pos.withCol(d),
+ Line: md.keyInfo[k].pos.Line,
+ input: d,
}
}
-func (md *MetaData) e(format string, args ...interface{}) error {
+func (md *MetaData) e(format string, args ...any) error {
f := "toml: "
if len(md.context) > 0 {
f = fmt.Sprintf("toml: (last key %q): ", md.context)
@@ -556,7 +587,7 @@ func (md *MetaData) e(format string, args ...interface{}) error {
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
-func rvalue(v interface{}) reflect.Value {
+func rvalue(v any) reflect.Value {
return indirect(reflect.ValueOf(v))
}
@@ -600,3 +631,8 @@ func isUnifiable(rv reflect.Value) bool {
}
return false
}
+
+// fmt %T with "interface {}" replaced with "any", which is far more readable.
+func fmtType(t any) string {
+ return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any")
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go
deleted file mode 100644
index 086d0b68..00000000
--- a/vendor/github.com/BurntSushi/toml/decode_go116.go
+++ /dev/null
@@ -1,19 +0,0 @@
-//go:build go1.16
-// +build go1.16
-
-package toml
-
-import (
- "io/fs"
-)
-
-// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
-// [Decode].
-func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
- fp, err := fsys.Open(path)
- if err != nil {
- return MetaData{}, err
- }
- defer fp.Close()
- return NewDecoder(fp).Decode(v)
-}
diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go
index c6af3f23..155709a8 100644
--- a/vendor/github.com/BurntSushi/toml/deprecated.go
+++ b/vendor/github.com/BurntSushi/toml/deprecated.go
@@ -5,17 +5,25 @@ import (
"io"
)
+// TextMarshaler is an alias for encoding.TextMarshaler.
+//
// Deprecated: use encoding.TextMarshaler
type TextMarshaler encoding.TextMarshaler
+// TextUnmarshaler is an alias for encoding.TextUnmarshaler.
+//
// Deprecated: use encoding.TextUnmarshaler
type TextUnmarshaler encoding.TextUnmarshaler
+// DecodeReader is an alias for NewDecoder(r).Decode(v).
+//
+// Deprecated: use NewDecoder(reader).Decode(&value).
+func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) }
+
+// PrimitiveDecode is an alias for MetaData.PrimitiveDecode().
+//
// Deprecated: use MetaData.PrimitiveDecode.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
+func PrimitiveDecode(primValue Primitive, v any) error {
md := MetaData{decoded: make(map[string]struct{})}
return md.unify(primValue.undecoded, rvalue(v))
}
-
-// Deprecated: use NewDecoder(reader).Decode(&value).
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
index 81a7c0fe..82c90a90 100644
--- a/vendor/github.com/BurntSushi/toml/doc.go
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -2,9 +2,6 @@
//
// This package supports TOML v1.0.0, as specified at https://toml.io
//
-// There is also support for delaying decoding with the Primitive type, and
-// querying the set of keys in a TOML document with the MetaData type.
-//
// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
// and can be used to verify if TOML document is valid. It can also be used to
// print the type of each key.
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
index 930e1d52..ac196e7d 100644
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -2,6 +2,7 @@ package toml
import (
"bufio"
+ "bytes"
"encoding"
"encoding/json"
"errors"
@@ -76,6 +77,17 @@ type Marshaler interface {
MarshalTOML() ([]byte, error)
}
+// Marshal returns a TOML representation of the Go value.
+//
+// See [Encoder] for a description of the encoding process.
+func Marshal(v any) ([]byte, error) {
+ buff := new(bytes.Buffer)
+ if err := NewEncoder(buff).Encode(v); err != nil {
+ return nil, err
+ }
+ return buff.Bytes(), nil
+}
+
// Encoder encodes a Go to a TOML document.
//
// The mapping between Go values and TOML values should be precisely the same as
@@ -115,28 +127,24 @@ type Marshaler interface {
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
// keys are silently discarded.
type Encoder struct {
- // String to use for a single indentation level; default is two spaces.
- Indent string
-
+ Indent string // string for a single indentation level; default is two spaces.
+ hasWritten bool // written any output to w yet?
w *bufio.Writer
- hasWritten bool // written any output to w yet?
}
// NewEncoder create a new Encoder.
func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: bufio.NewWriter(w),
- Indent: " ",
- }
+ return &Encoder{w: bufio.NewWriter(w), Indent: " "}
}
// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
//
// An error is returned if the value given cannot be encoded to a valid TOML
// document.
-func (enc *Encoder) Encode(v interface{}) error {
+func (enc *Encoder) Encode(v any) error {
rv := eindirect(reflect.ValueOf(v))
- if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+ err := enc.safeEncode(Key([]string{}), rv)
+ if err != nil {
return err
}
return enc.w.Flush()
@@ -279,18 +287,30 @@ func (enc *Encoder) eElement(rv reflect.Value) {
case reflect.Float32:
f := rv.Float()
if math.IsNaN(f) {
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
enc.wf("nan")
} else if math.IsInf(f, 0) {
- enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
+ enc.wf("inf")
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
}
case reflect.Float64:
f := rv.Float()
if math.IsNaN(f) {
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
enc.wf("nan")
} else if math.IsInf(f, 0) {
- enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
+ enc.wf("inf")
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
}
@@ -303,7 +323,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
case reflect.Interface:
enc.eElement(rv.Elem())
default:
- encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
+ encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface())))
}
}
@@ -382,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
- var mapKeysDirect, mapKeysSub []string
+ var mapKeysDirect, mapKeysSub []reflect.Value
for _, mapKey := range rv.MapKeys() {
- k := mapKey.String()
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
- mapKeysSub = append(mapKeysSub, k)
+ mapKeysSub = append(mapKeysSub, mapKey)
} else {
- mapKeysDirect = append(mapKeysDirect, k)
+ mapKeysDirect = append(mapKeysDirect, mapKey)
}
}
- var writeMapKeys = func(mapKeys []string, trailC bool) {
- sort.Strings(mapKeys)
+ writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
+ sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
for i, mapKey := range mapKeys {
- val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
+ val := eindirect(rv.MapIndex(mapKey))
if isNil(val) {
continue
}
if inline {
- enc.writeKeyValue(Key{mapKey}, val, true)
+ enc.writeKeyValue(Key{mapKey.String()}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
}
} else {
- enc.encode(key.add(mapKey), val)
+ enc.encode(key.add(mapKey.String()), val)
}
}
}
@@ -421,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
}
}
-const is32Bit = (32 << (^uint(0) >> 63)) == 32
-
func pointerTo(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
return pointerTo(t.Elem())
@@ -457,6 +474,15 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
frv := eindirect(rv.Field(i))
+ // Need to make a copy because ... ehm, I don't know why... I guess
+ // allocating a new array can cause it to fail(?)
+ //
+ // Done for: https://github.com/BurntSushi/toml/issues/430
+ // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
+ copyStart := make([]int, len(start))
+ copy(copyStart, start)
+ start = copyStart
+
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
//
@@ -471,50 +497,43 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if typeIsTable(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
- // Copy so it works correct on 32bit archs; not clear why this
- // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
- // This also works fine on 64bit, but 32bit archs are somewhat
- // rare and this is a wee bit faster.
- if is32Bit {
- copyStart := make([]int, len(start))
- copy(copyStart, start)
- fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
- } else {
- fieldsDirect = append(fieldsDirect, append(start, f.Index...))
- }
+ fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
addFields(rt, rv, nil)
- writeFields := func(fields [][]int) {
+ writeFields := func(fields [][]int, totalFields int) {
for _, fieldIndex := range fields {
fieldType := rt.FieldByIndex(fieldIndex)
- fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
-
- if isNil(fieldVal) { /// Don't write anything for nil fields.
- continue
- }
+ fieldVal := rv.FieldByIndex(fieldIndex)
opts := getOptions(fieldType.Tag)
if opts.skip {
continue
}
+ if opts.omitempty && isEmpty(fieldVal) {
+ continue
+ }
+
+ fieldVal = eindirect(fieldVal)
+
+ if isNil(fieldVal) { /// Don't write anything for nil fields.
+ continue
+ }
+
keyName := fieldType.Name
if opts.name != "" {
keyName = opts.name
}
- if opts.omitempty && enc.isEmpty(fieldVal) {
- continue
- }
if opts.omitzero && isZero(fieldVal) {
continue
}
if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
- if fieldIndex[0] != len(fields)-1 {
+ if fieldIndex[0] != totalFields-1 {
enc.wf(", ")
}
} else {
@@ -526,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.wf("{")
}
- writeFields(fieldsDirect)
- writeFields(fieldsSub)
+
+ l := len(fieldsDirect) + len(fieldsSub)
+ writeFields(fieldsDirect, l)
+ writeFields(fieldsSub, l)
if inline {
enc.wf("}")
}
@@ -649,7 +670,7 @@ func isZero(rv reflect.Value) bool {
return false
}
-func (enc *Encoder) isEmpty(rv reflect.Value) bool {
+func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
@@ -664,13 +685,15 @@ func (enc *Encoder) isEmpty(rv reflect.Value) bool {
// type b struct{ s []string }
// s := a{field: b{s: []string{"AAA"}}}
for i := 0; i < rv.NumField(); i++ {
- if !enc.isEmpty(rv.Field(i)) {
+ if !isEmpty(rv.Field(i)) {
return false
}
}
return true
case reflect.Bool:
return !rv.Bool()
+ case reflect.Ptr:
+ return rv.IsNil()
}
return false
}
@@ -693,8 +716,11 @@ func (enc *Encoder) newline() {
// v v v v vv
// key = {k = 1, k2 = 2}
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
+ /// Marshaler used on top-level document; call eElement() to just call
+ /// Marshal{TOML,Text}.
if len(key) == 0 {
- encPanic(errNoKey)
+ enc.eElement(val)
+ return
}
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
@@ -703,7 +729,7 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
}
}
-func (enc *Encoder) wf(format string, v ...interface{}) {
+func (enc *Encoder) wf(format string, v ...any) {
_, err := fmt.Fprintf(enc.w, format, v...)
if err != nil {
encPanic(err)
diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go
index f4f390e6..b7077d3a 100644
--- a/vendor/github.com/BurntSushi/toml/error.go
+++ b/vendor/github.com/BurntSushi/toml/error.go
@@ -67,24 +67,39 @@ type ParseError struct {
// Position of an error.
type Position struct {
Line int // Line number, starting at 1.
+ Col int // Error column, starting at 1.
Start int // Start of error, as byte offset starting at 0.
- Len int // Lenght in bytes.
+ Len int // Length of the error in bytes.
+}
+
+func (p Position) withCol(tomlFile string) Position {
+ var (
+ pos int
+ lines = strings.Split(tomlFile, "\n")
+ )
+ for i := range lines {
+ ll := len(lines[i]) + 1 // +1 for the removed newline
+ if pos+ll >= p.Start {
+ p.Col = p.Start - pos + 1
+ if p.Col < 1 { // Should never happen, but just in case.
+ p.Col = 1
+ }
+ break
+ }
+ pos += ll
+ }
+ return p
}
func (pe ParseError) Error() string {
- msg := pe.Message
- if msg == "" { // Error from errorf()
- msg = pe.err.Error()
- }
-
if pe.LastKey == "" {
- return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
+ return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message)
}
return fmt.Sprintf("toml: line %d (last key %q): %s",
- pe.Position.Line, pe.LastKey, msg)
+ pe.Position.Line, pe.LastKey, pe.Message)
}
-// ErrorWithUsage() returns the error with detailed location context.
+// ErrorWithPosition returns the error with detailed location context.
//
// See the documentation on [ParseError].
func (pe ParseError) ErrorWithPosition() string {
@@ -92,39 +107,41 @@ func (pe ParseError) ErrorWithPosition() string {
return pe.Error()
}
- var (
- lines = strings.Split(pe.input, "\n")
- col = pe.column(lines)
- b = new(strings.Builder)
- )
-
- msg := pe.Message
- if msg == "" {
- msg = pe.err.Error()
- }
-
// TODO: don't show control characters as literals? This may not show up
// well everywhere.
+ var (
+ lines = strings.Split(pe.input, "\n")
+ b = new(strings.Builder)
+ )
if pe.Position.Len == 1 {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
- msg, pe.Position.Line, col+1)
+ pe.Message, pe.Position.Line, pe.Position.Col)
} else {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
- msg, pe.Position.Line, col, col+pe.Position.Len)
+ pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1)
}
if pe.Position.Line > 2 {
- fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
}
if pe.Position.Line > 1 {
- fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2]))
}
- fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
- fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
+
+ /// Expand tabs, so that the ^^^s are at the correct position, but leave
+ /// "column 10-13" intact. Adjusting this to the visual column would be
+ /// better, but we don't know the tabsize of the user in their editor, which
+ /// can be 8, 4, 2, or something else. We can't know. So leaving it as the
+ /// character index is probably the "most correct".
+ expanded := expandTab(lines[pe.Position.Line-1])
+ diff := len(expanded) - len(lines[pe.Position.Line-1])
+
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
+ fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len))
return b.String()
}
-// ErrorWithUsage() returns the error with detailed location context and usage
+// ErrorWithUsage returns the error with detailed location context and usage
// guidance.
//
// See the documentation on [ParseError].
@@ -142,34 +159,47 @@ func (pe ParseError) ErrorWithUsage() string {
return m
}
-func (pe ParseError) column(lines []string) int {
- var pos, col int
- for i := range lines {
- ll := len(lines[i]) + 1 // +1 for the removed newline
- if pos+ll >= pe.Position.Start {
- col = pe.Position.Start - pos
- if col < 0 { // Should never happen, but just in case.
- col = 0
+func expandTab(s string) string {
+ var (
+ b strings.Builder
+ l int
+ fill = func(n int) string {
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = ' '
}
- break
+ return string(b)
+ }
+ )
+ b.Grow(len(s))
+ for _, r := range s {
+ switch r {
+ case '\t':
+ tw := 8 - l%8
+ b.WriteString(fill(tw))
+ l += tw
+ default:
+ b.WriteRune(r)
+ l += 1
}
- pos += ll
}
-
- return col
+ return b.String()
}
type (
errLexControl struct{ r rune }
errLexEscape struct{ r rune }
errLexUTF8 struct{ b byte }
- errLexInvalidNum struct{ v string }
- errLexInvalidDate struct{ v string }
+ errParseDate struct{ v string }
errLexInlineTableNL struct{}
errLexStringNL struct{}
errParseRange struct {
- i interface{} // int or float
- size string // "int64", "uint16", etc.
+ i any // int or float
+ size string // "int64", "uint16", etc.
+ }
+ errUnsafeFloat struct {
+ i interface{} // float32 or float64
+ size string // "float32" or "float64"
}
errParseDuration struct{ d string }
)
@@ -183,18 +213,20 @@ func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape
func (e errLexEscape) Usage() string { return usageEscape }
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
func (e errLexUTF8) Usage() string { return "" }
-func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
-func (e errLexInvalidNum) Usage() string { return "" }
-func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
-func (e errLexInvalidDate) Usage() string { return "" }
+func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) }
+func (e errParseDate) Usage() string { return usageDate }
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
func (e errLexStringNL) Usage() string { return usageStringNewline }
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
func (e errParseRange) Usage() string { return usageIntOverflow }
-func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
-func (e errParseDuration) Usage() string { return usageDuration }
+func (e errUnsafeFloat) Error() string {
+ return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size)
+}
+func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat }
+func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
+func (e errParseDuration) Usage() string { return usageDuration }
const usageEscape = `
A '\' inside a "-delimited string is interpreted as an escape character.
@@ -251,19 +283,35 @@ bug in the program that uses too small of an integer.
The maximum and minimum values are:
size │ lowest │ highest
- ───────┼────────────────┼──────────
+ ───────┼────────────────┼──────────────
int8 │ -128 │ 127
int16 │ -32,768 │ 32,767
int32 │ -2,147,483,648 │ 2,147,483,647
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
uint8 │ 0 │ 255
- uint16 │ 0 │ 65535
- uint32 │ 0 │ 4294967295
+ uint16 │ 0 │ 65,535
+ uint32 │ 0 │ 4,294,967,295
uint64 │ 0 │ 1.8 × 10¹⁸
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
`
+const usageUnsafeFloat = `
+This number is outside of the "safe" range for floating point numbers; whole
+(non-fractional) numbers outside the below range can not always be represented
+accurately in a float, leading to some loss of accuracy.
+
+Explicitly mark a number as a fractional unit by adding ".0", which will incur
+some loss of accuracy; for example:
+
+ f = 2_000_000_000.0
+
+Accuracy ranges:
+
+ float32 = 16,777,215
+ float64 = 9,007,199,254,740,991
+`
+
const usageDuration = `
A duration must be as "number", without any spaces. Valid units are:
@@ -277,3 +325,23 @@ A duration must be as "number", without any spaces. Valid units are:
You can combine multiple units; for example "5m10s" for 5 minutes and 10
seconds.
`
+
+const usageDate = `
+A TOML datetime must be in one of the following formats:
+
+ 2006-01-02T15:04:05Z07:00 Date and time, with timezone.
+ 2006-01-02T15:04:05 Date and time, but without timezone.
+ 2006-01-02 Date without a time or timezone.
+ 15:04:05 Just a time, without any timezone.
+
+Seconds may optionally have a fraction, up to nanosecond precision:
+
+ 15:04:05.123
+ 15:04:05.856018510
+`
+
+// TOML 1.1:
+// The seconds part in times is optional, and may be omitted:
+// 2006-01-02T15:04Z07:00
+// 2006-01-02T15:04
+// 15:04
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
index d4d70871..1c3b4770 100644
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -17,6 +17,7 @@ const (
itemEOF
itemText
itemString
+ itemStringEsc
itemRawString
itemMultilineString
itemRawMultilineString
@@ -46,12 +47,14 @@ func (p Position) String() string {
}
type lexer struct {
- input string
- start int
- pos int
- line int
- state stateFn
- items chan item
+ input string
+ start int
+ pos int
+ line int
+ state stateFn
+ items chan item
+ tomlNext bool
+ esc bool
// Allow for backing up up to 4 runes. This is necessary because TOML
// contains 3-rune tokens (""" and ''').
@@ -87,13 +90,14 @@ func (lx *lexer) nextItem() item {
}
}
-func lex(input string) *lexer {
+func lex(input string, tomlNext bool) *lexer {
lx := &lexer{
- input: input,
- state: lexTop,
- items: make(chan item, 10),
- stack: make([]stateFn, 0, 10),
- line: 1,
+ input: input,
+ state: lexTop,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ line: 1,
+ tomlNext: tomlNext,
}
return lx
}
@@ -162,7 +166,7 @@ func (lx *lexer) next() (r rune) {
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
- if r == utf8.RuneError {
+ if r == utf8.RuneError && w == 1 {
lx.error(errLexUTF8{lx.input[lx.pos]})
return utf8.RuneError
}
@@ -268,10 +272,12 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
}
// errorf is like error, and creates a new error.
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+func (lx *lexer) errorf(format string, values ...any) stateFn {
if lx.atEOF {
pos := lx.getPos()
- pos.Line--
+ if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
+ pos.Line--
+ }
pos.Len = 1
pos.Start = lx.pos - 1
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
@@ -331,9 +337,7 @@ func lexTopEnd(lx *lexer) stateFn {
lx.emit(itemEOF)
return nil
}
- return lx.errorf(
- "expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
- r)
+ return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
@@ -408,7 +412,7 @@ func lexTableNameEnd(lx *lexer) stateFn {
// Lexes only one part, e.g. only 'a' inside 'a.b'.
func lexBareName(lx *lexer) stateFn {
r := lx.next()
- if isBareKeyChar(r) {
+ if isBareKeyChar(r, lx.tomlNext) {
return lexBareName
}
lx.backup()
@@ -490,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn {
lx.emit(itemKeyEnd)
return lexSkip(lx, lexValue)
default:
+ if r == '\n' {
+ return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
+ }
return lx.errorf("expected '.' or '=', but got %q instead", r)
}
}
@@ -558,6 +565,9 @@ func lexValue(lx *lexer) stateFn {
if r == eof {
return lx.errorf("unexpected EOF; expected value")
}
+ if r == '\n' {
+ return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
+ }
return lx.errorf("expected value but found %q instead", r)
}
@@ -618,6 +628,9 @@ func lexInlineTableValue(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
+ if lx.tomlNext {
+ return lexSkip(lx, lexInlineTableValue)
+ }
return lx.errorPrevLine(errLexInlineTableNL{})
case r == '#':
lx.push(lexInlineTableValue)
@@ -640,6 +653,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
+ if lx.tomlNext {
+ return lexSkip(lx, lexInlineTableValueEnd)
+ }
return lx.errorPrevLine(errLexInlineTableNL{})
case r == '#':
lx.push(lexInlineTableValueEnd)
@@ -648,6 +664,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
lx.ignore()
lx.skip(isWhitespace)
if lx.peek() == '}' {
+ if lx.tomlNext {
+ return lexInlineTableValueEnd
+ }
return lx.errorf("trailing comma not allowed in inline tables")
}
return lexInlineTableValue
@@ -687,7 +706,12 @@ func lexString(lx *lexer) stateFn {
return lexStringEscape
case r == '"':
lx.backup()
- lx.emit(itemString)
+ if lx.esc {
+ lx.esc = false
+ lx.emit(itemStringEsc)
+ } else {
+ lx.emit(itemString)
+ }
lx.next()
lx.ignore()
return lx.pop()
@@ -737,6 +761,7 @@ func lexMultilineString(lx *lexer) stateFn {
lx.backup() /// backup: don't include the """ in the item.
lx.backup()
lx.backup()
+ lx.esc = false
lx.emit(itemMultilineString)
lx.next() /// Read over ''' again and discard it.
lx.next()
@@ -770,8 +795,8 @@ func lexRawString(lx *lexer) stateFn {
}
}
-// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning ''' has already been consumed and
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a
+// string. It assumes that the beginning triple-' has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next()
@@ -826,8 +851,14 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
}
func lexStringEscape(lx *lexer) stateFn {
+ lx.esc = true
r := lx.next()
switch r {
+ case 'e':
+ if !lx.tomlNext {
+ return lx.error(errLexEscape{r})
+ }
+ fallthrough
case 'b':
fallthrough
case 't':
@@ -846,6 +877,11 @@ func lexStringEscape(lx *lexer) stateFn {
fallthrough
case '\\':
return lx.pop()
+ case 'x':
+ if !lx.tomlNext {
+ return lx.error(errLexEscape{r})
+ }
+ return lexHexEscape
case 'u':
return lexShortUnicodeEscape
case 'U':
@@ -854,14 +890,23 @@ func lexStringEscape(lx *lexer) stateFn {
return lx.error(errLexEscape{r})
}
+func lexHexEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 2; i++ {
+ r = lx.next()
+ if !isHex(r) {
+ return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current())
+ }
+ }
+ return lx.pop()
+}
+
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(
- `expected four hexadecimal digits after '\u', but got %q instead`,
- lx.current())
+ if !isHex(r) {
+ return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current())
}
}
return lx.pop()
@@ -871,10 +916,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(
- `expected eight hexadecimal digits after '\U', but got %q instead`,
- lx.current())
+ if !isHex(r) {
+ return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current())
}
}
return lx.pop()
@@ -941,7 +984,7 @@ func lexDatetime(lx *lexer) stateFn {
// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
func lexHexInteger(lx *lexer) stateFn {
r := lx.next()
- if isHexadecimal(r) {
+ if isHex(r) {
return lexHexInteger
}
switch r {
@@ -1075,8 +1118,8 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
return lexOctalInteger
case 'x':
r = lx.peek()
- if !isHexadecimal(r) {
- lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
+ if !isHex(r) {
+ lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
}
return lexHexInteger
}
@@ -1173,7 +1216,7 @@ func (itype itemType) String() string {
return "EOF"
case itemText:
return "Text"
- case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+ case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
@@ -1206,7 +1249,7 @@ func (itype itemType) String() string {
}
func (item item) String() string {
- return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+ return fmt.Sprintf("(%s, %s)", item.typ, item.val)
}
func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
@@ -1222,12 +1265,8 @@ func isControl(r rune) bool { // Control characters except \t, \r, \n
func isDigit(r rune) bool { return r >= '0' && r <= '9' }
func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
-func isHexadecimal(r rune) bool {
- return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
-}
-func isBareKeyChar(r rune) bool {
- return (r >= 'A' && r <= 'Z') ||
- (r >= 'a' && r <= 'z') ||
- (r >= '0' && r <= '9') ||
- r == '_' || r == '-'
+func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
+func isBareKeyChar(r rune, tomlNext bool) bool {
+ return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') || r == '_' || r == '-'
}
diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go
index 71847a04..0d337026 100644
--- a/vendor/github.com/BurntSushi/toml/meta.go
+++ b/vendor/github.com/BurntSushi/toml/meta.go
@@ -13,7 +13,7 @@ type MetaData struct {
context Key // Used only during decoding.
keyInfo map[string]keyInfo
- mapping map[string]interface{}
+ mapping map[string]any
keys []Key
decoded map[string]struct{}
data []byte // Input file; for errors.
@@ -31,12 +31,12 @@ func (md *MetaData) IsDefined(key ...string) bool {
}
var (
- hash map[string]interface{}
+ hash map[string]any
ok bool
- hashOrVal interface{} = md.mapping
+ hashOrVal any = md.mapping
)
for _, k := range key {
- if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+ if hash, ok = hashOrVal.(map[string]any); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
@@ -94,28 +94,52 @@ func (md *MetaData) Undecoded() []Key {
type Key []string
func (k Key) String() string {
- ss := make([]string, len(k))
- for i := range k {
- ss[i] = k.maybeQuoted(i)
+ // This is called quite often, so it's a bit funky to make it faster.
+ var b strings.Builder
+ b.Grow(len(k) * 25)
+outer:
+ for i, kk := range k {
+ if i > 0 {
+ b.WriteByte('.')
+ }
+ if kk == "" {
+ b.WriteString(`""`)
+ } else {
+ for _, r := range kk {
+ // "Inline" isBareKeyChar
+ if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') {
+ b.WriteByte('"')
+ b.WriteString(dblQuotedReplacer.Replace(kk))
+ b.WriteByte('"')
+ continue outer
+ }
+ }
+ b.WriteString(kk)
+ }
}
- return strings.Join(ss, ".")
+ return b.String()
}
func (k Key) maybeQuoted(i int) string {
if k[i] == "" {
return `""`
}
- for _, c := range k[i] {
- if !isBareKeyChar(c) {
- return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
+ for _, r := range k[i] {
+ if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' {
+ continue
}
+ return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
}
return k[i]
}
+// Like append(), but only increase the cap by 1.
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
+
+func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece.
+func (k Key) last() string { return k[len(k)-1] } // last piece of this key.
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
index d2542d6f..e3ea8a9a 100644
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -2,6 +2,8 @@ package toml
import (
"fmt"
+ "math"
+ "os"
"strconv"
"strings"
"time"
@@ -15,12 +17,13 @@ type parser struct {
context Key // Full key for the current hash in scope.
currentKey string // Base key name for everything except hashes.
pos Position // Current position in the TOML file.
+ tomlNext bool
ordered []Key // List of keys in the order that they appear in the TOML data.
- keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
- mapping map[string]interface{} // Map keyname → key value.
- implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
+ keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
+ mapping map[string]any // Map keyname → key value.
+ implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
}
type keyInfo struct {
@@ -29,6 +32,8 @@ type keyInfo struct {
}
func parse(data string) (p *parser, err error) {
+ _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110")
+
defer func() {
if r := recover(); r != nil {
if pErr, ok := r.(ParseError); ok {
@@ -41,9 +46,12 @@ func parse(data string) (p *parser, err error) {
}()
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
- // which mangles stuff.
- if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
+ // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add
+ // it anyway.
+ if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
data = data[2:]
+ } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
+ data = data[3:]
}
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
@@ -56,7 +64,7 @@ func parse(data string) (p *parser, err error) {
if i := strings.IndexRune(data[:ex], 0); i > -1 {
return nil, ParseError{
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
- Position: Position{Line: 1, Start: i, Len: 1},
+ Position: Position{Line: 1, Col: 1, Start: i, Len: 1},
Line: 1,
input: data,
}
@@ -64,10 +72,11 @@ func parse(data string) (p *parser, err error) {
p = &parser{
keyInfo: make(map[string]keyInfo),
- mapping: make(map[string]interface{}),
- lx: lex(data),
+ mapping: make(map[string]any),
+ lx: lex(data, tomlNext),
ordered: make([]Key, 0),
implicits: make(map[string]struct{}),
+ tomlNext: tomlNext,
}
for {
item := p.next()
@@ -82,26 +91,27 @@ func parse(data string) (p *parser, err error) {
func (p *parser) panicErr(it item, err error) {
panic(ParseError{
+ Message: err.Error(),
err: err,
- Position: it.pos,
+ Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
}
-func (p *parser) panicItemf(it item, format string, v ...interface{}) {
+func (p *parser) panicItemf(it item, format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
- Position: it.pos,
+ Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
}
-func (p *parser) panicf(format string, v ...interface{}) {
+func (p *parser) panicf(format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
- Position: p.pos,
+ Position: p.pos.withCol(p.lx.input),
Line: p.pos.Line,
LastKey: p.current(),
})
@@ -113,10 +123,11 @@ func (p *parser) next() item {
if it.typ == itemError {
if it.err != nil {
panic(ParseError{
- Position: it.pos,
+ Message: it.err.Error(),
+ err: it.err,
+ Position: it.pos.withCol(p.lx.input),
Line: it.pos.Line,
LastKey: p.current(),
- err: it.err,
})
}
@@ -131,7 +142,7 @@ func (p *parser) nextPos() item {
return it
}
-func (p *parser) bug(format string, v ...interface{}) {
+func (p *parser) bug(format string, v ...any) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
@@ -186,20 +197,21 @@ func (p *parser) topLevel(item item) {
p.assertEqual(itemKeyEnd, k.typ)
/// The current key is the last part.
- p.currentKey = key[len(key)-1]
+ p.currentKey = key.last()
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
- context := key[:len(key)-1]
+ context := key.parent()
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
/// Set value.
vItem := p.next()
val, typ := p.value(vItem, false)
- p.set(p.currentKey, val, typ, vItem.pos)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ, vItem.pos)
/// Remove the context we added (preserving any context from [tbl] lines).
p.context = outerContext
@@ -214,7 +226,7 @@ func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
- case itemString, itemMultilineString,
+ case itemString, itemStringEsc, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it, false)
return s.(string)
@@ -231,12 +243,14 @@ var datetimeRepl = strings.NewReplacer(
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
-func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
+func (p *parser) value(it item, parentIsArray bool) (any, tomlType) {
switch it.typ {
case itemString:
+ return it.val, p.typeOfPrimitive(it)
+ case itemStringEsc:
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
case itemMultilineString:
- return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
+ return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
@@ -266,7 +280,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
panic("unreachable")
}
-func (p *parser) valueInteger(it item) (interface{}, tomlType) {
+func (p *parser) valueInteger(it item) (any, tomlType) {
if !numUnderscoresOK(it.val) {
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
}
@@ -290,7 +304,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
return num, p.typeOfPrimitive(it)
}
-func (p *parser) valueFloat(it item) (interface{}, tomlType) {
+func (p *parser) valueFloat(it item) (any, tomlType) {
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
@@ -314,7 +328,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
- if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
+ signbit := false
+ if val == "+nan" || val == "-nan" {
+ signbit = val == "-nan"
val = "nan"
}
num, err := strconv.ParseFloat(val, 64)
@@ -325,20 +341,29 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
p.panicItemf(it, "Invalid float value: %q", it.val)
}
}
+ if signbit {
+ num = math.Copysign(num, -1)
+ }
return num, p.typeOfPrimitive(it)
}
var dtTypes = []struct {
fmt string
zone *time.Location
+ next bool
}{
- {time.RFC3339Nano, time.Local},
- {"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
- {"2006-01-02", internal.LocalDate},
- {"15:04:05.999999999", internal.LocalTime},
+ {time.RFC3339Nano, time.Local, false},
+ {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false},
+ {"2006-01-02", internal.LocalDate, false},
+ {"15:04:05.999999999", internal.LocalTime, false},
+
+ // tomlNext
+ {"2006-01-02T15:04Z07:00", time.Local, true},
+ {"2006-01-02T15:04", internal.LocalDatetime, true},
+ {"15:04", internal.LocalTime, true},
}
-func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
+func (p *parser) valueDatetime(it item) (any, tomlType) {
it.val = datetimeRepl.Replace(it.val)
var (
t time.Time
@@ -346,28 +371,49 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
err error
)
for _, dt := range dtTypes {
+ if dt.next && !p.tomlNext {
+ continue
+ }
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
if err == nil {
+ if missingLeadingZero(it.val, dt.fmt) {
+ p.panicErr(it, errParseDate{it.val})
+ }
ok = true
break
}
}
if !ok {
- p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
+ p.panicErr(it, errParseDate{it.val})
}
return t, p.typeOfPrimitive(it)
}
-func (p *parser) valueArray(it item) (interface{}, tomlType) {
+// Go's time.Parse() will accept numbers without a leading zero; there isn't any
+// way to require it. https://github.com/golang/go/issues/29911
+//
+// Depend on the fact that the separators (- and :) should always be at the same
+// location.
+func missingLeadingZero(d, l string) bool {
+ for i, c := range []byte(l) {
+ if c == '.' || c == 'Z' {
+ return false
+ }
+ if (c < '0' || c > '9') && d[i] != c {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *parser) valueArray(it item) (any, tomlType) {
p.setType(p.currentKey, tomlArray, it.pos)
var (
- types []tomlType
-
- // Initialize to a non-nil empty slice. This makes it consistent with
- // how S = [] decodes into a non-nil slice inside something like struct
- // { S []string }. See #338
- array = []interface{}{}
+ // Initialize to a non-nil slice to make it consistent with how S = []
+ // decodes into a non-nil slice inside something like struct { S
+ // []string }. See #338
+ array = make([]any, 0, 2)
)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
@@ -377,20 +423,20 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
val, typ := p.value(it, true)
array = append(array, val)
- types = append(types, typ)
- // XXX: types isn't used here, we need it to record the accurate type
+ // XXX: type isn't used here, we need it to record the accurate type
// information.
//
// Not entirely sure how to best store this; could use "key[0]",
// "key[1]" notation, or maybe store it on the Array type?
+ _ = typ
}
return array, tomlArray
}
-func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
+func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) {
var (
- hash = make(map[string]interface{})
+ topHash = make(map[string]any)
outerContext = p.context
outerKey = p.currentKey
)
@@ -418,19 +464,33 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
p.assertEqual(itemKeyEnd, k.typ)
/// The current key is the last part.
- p.currentKey = key[len(key)-1]
+ p.currentKey = key.last()
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
- context := key[:len(key)-1]
+ context := key.parent()
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
/// Set the value.
val, typ := p.value(p.next(), false)
- p.set(p.currentKey, val, typ, it.pos)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ, it.pos)
+
+ hash := topHash
+ for _, c := range context {
+ h, ok := hash[c]
+ if !ok {
+ h = make(map[string]any)
+ hash[c] = h
+ }
+ hash, ok = h.(map[string]any)
+ if !ok {
+ p.panicf("%q is not a table", p.context)
+ }
+ }
hash[p.currentKey] = val
/// Restore context.
@@ -438,7 +498,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
}
p.context = outerContext
p.currentKey = outerKey
- return hash, tomlHash
+ return topHash, tomlHash
}
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
@@ -468,9 +528,9 @@ func numUnderscoresOK(s string) bool {
}
}
- // isHexadecimal is a superset of all the permissable characters
- // surrounding an underscore.
- accept = isHexadecimal(r)
+ // isHex is a superset of all the permissible characters surrounding an
+ // underscore.
+ accept = isHex(r)
}
return accept
}
@@ -493,21 +553,19 @@ func numPeriodsOK(s string) bool {
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) addContext(key Key, array bool) {
- var ok bool
-
- // Always start at the top level and drill down for our context.
+ /// Always start at the top level and drill down for our context.
hashContext := p.mapping
- keyContext := make(Key, 0)
+ keyContext := make(Key, 0, len(key)-1)
- // We only need implicit hashes for key[0:-1]
- for _, k := range key[0 : len(key)-1] {
- _, ok = hashContext[k]
+ /// We only need implicit hashes for the parents.
+ for _, k := range key.parent() {
+ _, ok := hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
- hashContext[k] = make(map[string]interface{})
+ hashContext[k] = make(map[string]any)
}
// If the hash context is actually an array of tables, then set
@@ -516,9 +574,9 @@ func (p *parser) addContext(key Key, array bool) {
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
- case []map[string]interface{}:
+ case []map[string]any:
hashContext = t[len(t)-1]
- case map[string]interface{}:
+ case map[string]any:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
@@ -529,40 +587,33 @@ func (p *parser) addContext(key Key, array bool) {
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
- k := key[len(key)-1]
+ k := key.last()
if _, ok := hashContext[k]; !ok {
- hashContext[k] = make([]map[string]interface{}, 0, 4)
+ hashContext[k] = make([]map[string]any, 0, 4)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
- if hash, ok := hashContext[k].([]map[string]interface{}); ok {
- hashContext[k] = append(hash, make(map[string]interface{}))
+ if hash, ok := hashContext[k].([]map[string]any); ok {
+ hashContext[k] = append(hash, make(map[string]any))
} else {
p.panicf("Key '%s' was already created and cannot be used as an array.", key)
}
} else {
- p.setValue(key[len(key)-1], make(map[string]interface{}))
+ p.setValue(key.last(), make(map[string]any))
}
- p.context = append(p.context, key[len(key)-1])
-}
-
-// set calls setValue and setType.
-func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
- p.setValue(key, val)
- p.setType(key, typ, pos)
-
+ p.context = append(p.context, key.last())
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
+func (p *parser) setValue(key string, value any) {
var (
- tmpHash interface{}
+ tmpHash any
ok bool
hash = p.mapping
- keyContext Key
+ keyContext = make(Key, 0, len(p.context)+1)
)
for _, k := range p.context {
keyContext = append(keyContext, k)
@@ -570,11 +621,11 @@ func (p *parser) setValue(key string, value interface{}) {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
- case []map[string]interface{}:
+ case []map[string]any:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
- case map[string]interface{}:
+ case map[string]any:
hash = t
default:
p.panicf("Key '%s' has already been defined.", keyContext)
@@ -601,9 +652,8 @@ func (p *parser) setValue(key string, value interface{}) {
p.removeImplicit(keyContext)
return
}
-
- // Otherwise, we have a concrete key trying to override a previous
- // key, which is *always* wrong.
+ // Otherwise, we have a concrete key trying to override a previous key,
+ // which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
@@ -632,14 +682,11 @@ func (p *parser) setType(key string, typ tomlType, pos Position) {
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
-func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
-func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
-func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
-func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
-func (p *parser) addImplicitContext(key Key) {
- p.addImplicit(key)
- p.addContext(key, false)
-}
+func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
+func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
+func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
+func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
+func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) }
// current returns the full key name of the current context.
func (p *parser) current() string {
@@ -662,114 +709,131 @@ func stripFirstNewline(s string) string {
return s
}
-// Remove newlines inside triple-quoted strings if a line ends with "\".
+// stripEscapedNewlines removes whitespace after line-ending backslashes in
+// multiline strings.
+//
+// A line-ending backslash is an unescaped \ followed only by whitespace until
+// the next newline. After a line-ending backslash, all whitespace is removed
+// until the next non-whitespace character.
func (p *parser) stripEscapedNewlines(s string) string {
- split := strings.Split(s, "\n")
- if len(split) < 1 {
- return s
- }
+ var (
+ b strings.Builder
+ i int
+ )
+ b.Grow(len(s))
+ for {
+ ix := strings.Index(s[i:], `\`)
+ if ix < 0 {
+ b.WriteString(s)
+ return b.String()
+ }
+ i += ix
- escNL := false // Keep track of the last non-blank line was escaped.
- for i, line := range split {
- line = strings.TrimRight(line, " \t\r")
-
- if len(line) == 0 || line[len(line)-1] != '\\' {
- split[i] = strings.TrimRight(split[i], "\r")
- if !escNL && i != len(split)-1 {
- split[i] += "\n"
+ if len(s) > i+1 && s[i+1] == '\\' {
+ // Escaped backslash.
+ i += 2
+ continue
+ }
+ // Scan until the next non-whitespace.
+ j := i + 1
+ whitespaceLoop:
+ for ; j < len(s); j++ {
+ switch s[j] {
+ case ' ', '\t', '\r', '\n':
+ default:
+ break whitespaceLoop
}
+ }
+ if j == i+1 {
+ // Not a whitespace escape.
+ i++
continue
}
-
- escBS := true
- for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
- escBS = !escBS
- }
- if escNL {
- line = strings.TrimLeft(line, " \t\r")
- }
- escNL = !escBS
-
- if escBS {
- split[i] += "\n"
+ if !strings.Contains(s[i:j], "\n") {
+ // This is not a line-ending backslash. (It's a bad escape sequence,
+ // but we can let replaceEscapes catch it.)
+ i++
continue
}
-
- if i == len(split)-1 {
- p.panicf("invalid escape: '\\ '")
- }
-
- split[i] = line[:len(line)-1] // Remove \
- if len(split)-1 > i {
- split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
- }
+ b.WriteString(s[:i])
+ s = s[j:]
+ i = 0
}
- return strings.Join(split, "")
}
func (p *parser) replaceEscapes(it item, str string) string {
- replaced := make([]rune, 0, len(str))
- s := []byte(str)
- r := 0
- for r < len(s) {
- if s[r] != '\\' {
- c, size := utf8.DecodeRune(s[r:])
- r += size
- replaced = append(replaced, c)
+ var (
+ b strings.Builder
+ skip = 0
+ )
+ b.Grow(len(str))
+ for i, c := range str {
+ if skip > 0 {
+ skip--
continue
}
- r += 1
- if r >= len(s) {
+ if c != '\\' {
+ b.WriteRune(c)
+ continue
+ }
+
+ if i >= len(str) {
p.bug("Escape sequence at end of string.")
return ""
}
- switch s[r] {
+ switch str[i+1] {
default:
- p.bug("Expected valid escape code after \\, but got %q.", s[r])
+ p.bug("Expected valid escape code after \\, but got %q.", str[i+1])
case ' ', '\t':
- p.panicItemf(it, "invalid escape: '\\%c'", s[r])
+ p.panicItemf(it, "invalid escape: '\\%c'", str[i+1])
case 'b':
- replaced = append(replaced, rune(0x0008))
- r += 1
+ b.WriteByte(0x08)
+ skip = 1
case 't':
- replaced = append(replaced, rune(0x0009))
- r += 1
+ b.WriteByte(0x09)
+ skip = 1
case 'n':
- replaced = append(replaced, rune(0x000A))
- r += 1
+ b.WriteByte(0x0a)
+ skip = 1
case 'f':
- replaced = append(replaced, rune(0x000C))
- r += 1
+ b.WriteByte(0x0c)
+ skip = 1
case 'r':
- replaced = append(replaced, rune(0x000D))
- r += 1
+ b.WriteByte(0x0d)
+ skip = 1
+ case 'e':
+ if p.tomlNext {
+ b.WriteByte(0x1b)
+ skip = 1
+ }
case '"':
- replaced = append(replaced, rune(0x0022))
- r += 1
+ b.WriteByte(0x22)
+ skip = 1
case '\\':
- replaced = append(replaced, rune(0x005C))
- r += 1
+ b.WriteByte(0x5c)
+ skip = 1
+ // The lexer guarantees the correct number of characters are present;
+ // don't need to check here.
+ case 'x':
+ if p.tomlNext {
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4])
+ b.WriteRune(escaped)
+ skip = 3
+ }
case 'u':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+5). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
- replaced = append(replaced, escaped)
- r += 5
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6])
+ b.WriteRune(escaped)
+ skip = 5
case 'U':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+9). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
- replaced = append(replaced, escaped)
- r += 9
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10])
+ b.WriteRune(escaped)
+ skip = 9
}
}
- return string(replaced)
+ return b.String()
}
-func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
- s := string(bs)
+func (p *parser) asciiEscapeToUnicode(it item, s string) rune {
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
index 254ca82e..10c51f7e 100644
--- a/vendor/github.com/BurntSushi/toml/type_fields.go
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -25,10 +25,8 @@ type field struct {
// breaking ties with index sequence.
type byName []field
-func (x byName) Len() int { return len(x) }
-
+func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
@@ -45,10 +43,8 @@ func (x byName) Less(i, j int) bool {
// byIndex sorts field by index sequence.
type byIndex []field
-func (x byIndex) Len() int { return len(x) }
-
+func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go
index 4e90d773..1c090d33 100644
--- a/vendor/github.com/BurntSushi/toml/type_toml.go
+++ b/vendor/github.com/BurntSushi/toml/type_toml.go
@@ -22,13 +22,8 @@ func typeIsTable(t tomlType) bool {
type tomlBaseType string
-func (btype tomlBaseType) typeString() string {
- return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
- return btype.typeString()
-}
+func (btype tomlBaseType) typeString() string { return string(btype) }
+func (btype tomlBaseType) String() string { return btype.typeString() }
var (
tomlInteger tomlBaseType = "Integer"
@@ -54,7 +49,7 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
return tomlFloat
case itemDatetime:
return tomlDatetime
- case itemString:
+ case itemString, itemStringEsc:
return tomlString
case itemMultilineString:
return tomlString
diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore
new file mode 100644
index 00000000..8d69a941
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/.gitignore
@@ -0,0 +1,15 @@
+bin/
+.idea/
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml
new file mode 100644
index 00000000..bb83c667
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+dist: xenial
+go:
+ - '1.10'
+ - '1.11'
+ - '1.12'
+ - '1.13'
+ - 'tip'
+
+script:
+ - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..4b462b0d
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
@@ -0,0 +1,43 @@
+# Contributor Code of Conduct
+
+This project adheres to [The Code Manifesto](http://codemanifesto.com)
+as its guidelines for contributor interactions.
+
+## The Code Manifesto
+
+We want to work in an ecosystem that empowers developers to reach their
+potential — one that encourages growth and effective collaboration. A space
+that is safe for all.
+
+A space such as this benefits everyone that participates in it. It encourages
+new developers to enter our field. It is through discussion and collaboration
+that we grow, and through growth that we improve.
+
+In the effort to create such a place, we hold to these values:
+
+1. **Discrimination limits us.** This includes discrimination on the basis of
+ race, gender, sexual orientation, gender identity, age, nationality,
+ technology and any other arbitrary exclusion of a group of people.
+2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort
+ levels. Remember that, and if brought to your attention, heed it.
+3. **We are our biggest assets.** None of us were born masters of our trade.
+ Each of us has been helped along the way. Return that favor, when and where
+ you can.
+4. **We are resources for the future.** As an extension of #3, share what you
+ know. Make yourself a resource to help those that come after you.
+5. **Respect defines us.** Treat others as you wish to be treated. Make your
+ discussions, criticisms and debates from a position of respectfulness. Ask
+ yourself, is it true? Is it necessary? Is it constructive? Anything less is
+ unacceptable.
+6. **Reactions require grace.** Angry responses are valid, but abusive language
+ and vindictive actions are toxic. When something happens that offends you,
+ handle it assertively, but be respectful. Escalate reasonably, and try to
+ allow the offender an opportunity to explain themselves, and possibly
+ correct the issue.
+7. **Opinions are just that: opinions.** Each and every one of us, due to our
+ background and upbringing, have varying opinions. That is perfectly
+ acceptable. Remember this: if you respect your own opinions, you should
+ respect the opinions of others.
+8. **To err is human.** You might not intend it, but mistakes do happen and
+ contribute to build experience. Tolerate honest mistakes, and don't
+ hesitate to apologize if you make one yourself.
diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
new file mode 100644
index 00000000..7ed268a1
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
@@ -0,0 +1,63 @@
+#### Support
+If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
+
+#### What to contribute
+If you don't know what to do, there are some features and functions that need to be done
+
+- [ ] Refactor code
+- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
+- [ ] Create actual list of contributors and projects that currently using this package
+- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
+- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
+- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
+- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
+- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
+- [ ] Implement fuzzing testing
+- [ ] Implement some struct/map/array utilities
+- [ ] Implement map/array validation
+- [ ] Implement benchmarking
+- [ ] Implement batch of examples
+- [ ] Look at forks for new features and fixes
+
+#### Advice
+Feel free to create what you want, but keep in mind when you implement new features:
+- Code must be clear and readable, names of variables/constants clearly describes what they are doing
+- Public functions must be documented and described in source file and added to README.md to the list of available functions
+- There are must be unit-tests for any new functions and improvements
+
+## Financial contributions
+
+We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator).
+Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
+
+
+## Credits
+
+
+### Contributors
+
+Thank you to all the people who have already contributed to govalidator!
+
+
+
+### Backers
+
+Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)]
+
+
+
+
+### Sponsors
+
+Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor))
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE
new file mode 100644
index 00000000..cacba910
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014-2020 Alex Saskevich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md
new file mode 100644
index 00000000..2c3fc35e
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/README.md
@@ -0,0 +1,622 @@
+govalidator
+===========
+[](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [](https://godoc.org/github.com/asaskevich/govalidator)
+[](https://travis-ci.org/asaskevich/govalidator)
+[](https://codecov.io/gh/asaskevich/govalidator) [](https://goreportcard.com/report/github.com/asaskevich/govalidator) [](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [](#backers) [](#sponsors) [](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
+
+A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
+
+#### Installation
+Make sure that Go is installed on your computer.
+Type the following command in your terminal:
+
+ go get github.com/asaskevich/govalidator
+
+or you can get specified release of the package with `gopkg.in`:
+
+ go get gopkg.in/asaskevich/govalidator.v10
+
+After it the package is ready to use.
+
+
+#### Import package in your project
+Add following line in your `*.go` file:
+```go
+import "github.com/asaskevich/govalidator"
+```
+If you are unhappy to use long `govalidator`, you can do something like this:
+```go
+import (
+ valid "github.com/asaskevich/govalidator"
+)
+```
+
+#### Activate behavior to require all fields have a validation tag by default
+`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
+
+`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors.
+
+```go
+import "github.com/asaskevich/govalidator"
+
+func init() {
+ govalidator.SetFieldsRequiredByDefault(true)
+}
+```
+
+Here's some code to explain it:
+```go
+// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
+type exampleStruct struct {
+ Name string ``
+ Email string `valid:"email"`
+}
+
+// this, however, will only fail when Email is empty or an invalid email address:
+type exampleStruct2 struct {
+ Name string `valid:"-"`
+ Email string `valid:"email"`
+}
+
+// lastly, this will only fail when Email is an invalid email address but not when it's empty:
+type exampleStruct2 struct {
+ Name string `valid:"-"`
+ Email string `valid:"email,optional"`
+}
+```
+
+#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
+##### Custom validator function signature
+A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
+```go
+import "github.com/asaskevich/govalidator"
+
+// old signature
+func(i interface{}) bool
+
+// new signature
+func(i interface{}, o interface{}) bool
+```
+
+##### Adding a custom validator
+This was changed to prevent data races when accessing custom validators.
+```go
+import "github.com/asaskevich/govalidator"
+
+// before
+govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool {
+ // ...
+}
+
+// after
+govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool {
+ // ...
+})
+```
+
+#### List of functions:
+```go
+func Abs(value float64) float64
+func BlackList(str, chars string) string
+func ByteLength(str string, params ...string) bool
+func CamelCaseToUnderscore(str string) string
+func Contains(str, substring string) bool
+func Count(array []interface{}, iterator ConditionIterator) int
+func Each(array []interface{}, iterator Iterator)
+func ErrorByField(e error, field string) string
+func ErrorsByField(e error) map[string]string
+func Filter(array []interface{}, iterator ConditionIterator) []interface{}
+func Find(array []interface{}, iterator ConditionIterator) interface{}
+func GetLine(s string, index int) (string, error)
+func GetLines(s string) []string
+func HasLowerCase(str string) bool
+func HasUpperCase(str string) bool
+func HasWhitespace(str string) bool
+func HasWhitespaceOnly(str string) bool
+func InRange(value interface{}, left interface{}, right interface{}) bool
+func InRangeFloat32(value, left, right float32) bool
+func InRangeFloat64(value, left, right float64) bool
+func InRangeInt(value, left, right interface{}) bool
+func IsASCII(str string) bool
+func IsAlpha(str string) bool
+func IsAlphanumeric(str string) bool
+func IsBase64(str string) bool
+func IsByteLength(str string, min, max int) bool
+func IsCIDR(str string) bool
+func IsCRC32(str string) bool
+func IsCRC32b(str string) bool
+func IsCreditCard(str string) bool
+func IsDNSName(str string) bool
+func IsDataURI(str string) bool
+func IsDialString(str string) bool
+func IsDivisibleBy(str, num string) bool
+func IsEmail(str string) bool
+func IsExistingEmail(email string) bool
+func IsFilePath(str string) (bool, int)
+func IsFloat(str string) bool
+func IsFullWidth(str string) bool
+func IsHalfWidth(str string) bool
+func IsHash(str string, algorithm string) bool
+func IsHexadecimal(str string) bool
+func IsHexcolor(str string) bool
+func IsHost(str string) bool
+func IsIP(str string) bool
+func IsIPv4(str string) bool
+func IsIPv6(str string) bool
+func IsISBN(str string, version int) bool
+func IsISBN10(str string) bool
+func IsISBN13(str string) bool
+func IsISO3166Alpha2(str string) bool
+func IsISO3166Alpha3(str string) bool
+func IsISO4217(str string) bool
+func IsISO693Alpha2(str string) bool
+func IsISO693Alpha3b(str string) bool
+func IsIn(str string, params ...string) bool
+func IsInRaw(str string, params ...string) bool
+func IsInt(str string) bool
+func IsJSON(str string) bool
+func IsLatitude(str string) bool
+func IsLongitude(str string) bool
+func IsLowerCase(str string) bool
+func IsMAC(str string) bool
+func IsMD4(str string) bool
+func IsMD5(str string) bool
+func IsMagnetURI(str string) bool
+func IsMongoID(str string) bool
+func IsMultibyte(str string) bool
+func IsNatural(value float64) bool
+func IsNegative(value float64) bool
+func IsNonNegative(value float64) bool
+func IsNonPositive(value float64) bool
+func IsNotNull(str string) bool
+func IsNull(str string) bool
+func IsNumeric(str string) bool
+func IsPort(str string) bool
+func IsPositive(value float64) bool
+func IsPrintableASCII(str string) bool
+func IsRFC3339(str string) bool
+func IsRFC3339WithoutZone(str string) bool
+func IsRGBcolor(str string) bool
+func IsRegex(str string) bool
+func IsRequestURI(rawurl string) bool
+func IsRequestURL(rawurl string) bool
+func IsRipeMD128(str string) bool
+func IsRipeMD160(str string) bool
+func IsRsaPub(str string, params ...string) bool
+func IsRsaPublicKey(str string, keylen int) bool
+func IsSHA1(str string) bool
+func IsSHA256(str string) bool
+func IsSHA384(str string) bool
+func IsSHA512(str string) bool
+func IsSSN(str string) bool
+func IsSemver(str string) bool
+func IsTiger128(str string) bool
+func IsTiger160(str string) bool
+func IsTiger192(str string) bool
+func IsTime(str string, format string) bool
+func IsType(v interface{}, params ...string) bool
+func IsURL(str string) bool
+func IsUTFDigit(str string) bool
+func IsUTFLetter(str string) bool
+func IsUTFLetterNumeric(str string) bool
+func IsUTFNumeric(str string) bool
+func IsUUID(str string) bool
+func IsUUIDv3(str string) bool
+func IsUUIDv4(str string) bool
+func IsUUIDv5(str string) bool
+func IsULID(str string) bool
+func IsUnixTime(str string) bool
+func IsUpperCase(str string) bool
+func IsVariableWidth(str string) bool
+func IsWhole(value float64) bool
+func LeftTrim(str, chars string) string
+func Map(array []interface{}, iterator ResultIterator) []interface{}
+func Matches(str, pattern string) bool
+func MaxStringLength(str string, params ...string) bool
+func MinStringLength(str string, params ...string) bool
+func NormalizeEmail(str string) (string, error)
+func PadBoth(str string, padStr string, padLen int) string
+func PadLeft(str string, padStr string, padLen int) string
+func PadRight(str string, padStr string, padLen int) string
+func PrependPathToErrors(err error, path string) error
+func Range(str string, params ...string) bool
+func RemoveTags(s string) string
+func ReplacePattern(str, pattern, replace string) string
+func Reverse(s string) string
+func RightTrim(str, chars string) string
+func RuneLength(str string, params ...string) bool
+func SafeFileName(str string) string
+func SetFieldsRequiredByDefault(value bool)
+func SetNilPtrAllowedByRequired(value bool)
+func Sign(value float64) float64
+func StringLength(str string, params ...string) bool
+func StringMatches(s string, params ...string) bool
+func StripLow(str string, keepNewLines bool) string
+func ToBoolean(str string) (bool, error)
+func ToFloat(str string) (float64, error)
+func ToInt(value interface{}) (res int64, err error)
+func ToJSON(obj interface{}) (string, error)
+func ToString(obj interface{}) string
+func Trim(str, chars string) string
+func Truncate(str string, length int, ending string) string
+func TruncatingErrorf(str string, args ...interface{}) error
+func UnderscoreToCamelCase(s string) string
+func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error)
+func ValidateStruct(s interface{}) (bool, error)
+func WhiteList(str, chars string) string
+type ConditionIterator
+type CustomTypeValidator
+type Error
+func (e Error) Error() string
+type Errors
+func (es Errors) Error() string
+func (es Errors) Errors() []error
+type ISO3166Entry
+type ISO693Entry
+type InterfaceParamValidator
+type Iterator
+type ParamValidator
+type ResultIterator
+type UnsupportedTypeError
+func (e *UnsupportedTypeError) Error() string
+type Validator
+```
+
+#### Examples
+###### IsURL
+```go
+println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
+```
+###### IsType
+```go
+println(govalidator.IsType("Bob", "string"))
+println(govalidator.IsType(1, "int"))
+i := 1
+println(govalidator.IsType(&i, "*int"))
+```
+
+IsType can be used through the tag `type` which is essential for map validation:
+```go
+type User struct {
+ Name string `valid:"type(string)"`
+ Age int `valid:"type(int)"`
+ Meta interface{} `valid:"type(string)"`
+}
+result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"})
+if err != nil {
+ println("error: " + err.Error())
+}
+println(result)
+```
+###### ToString
+```go
+type User struct {
+ FirstName string
+ LastName string
+}
+
+str := govalidator.ToString(&User{"John", "Juan"})
+println(str)
+```
+###### Each, Map, Filter, Count for slices
+Each iterates over the slice/array and calls Iterator for every item
+```go
+data := []interface{}{1, 2, 3, 4, 5}
+var fn govalidator.Iterator = func(value interface{}, index int) {
+ println(value.(int))
+}
+govalidator.Each(data, fn)
+```
+```go
+data := []interface{}{1, 2, 3, 4, 5}
+var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
+ return value.(int) * 3
+}
+_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
+```
+```go
+data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
+ return value.(int)%2 == 0
+}
+_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
+_ = govalidator.Count(data, fn) // result = 5
+```
+###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
+If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
+```go
+govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
+ return str == "duck"
+})
+```
+For completely custom validators (interface-based), see below.
+
+Here is a list of available validators for struct fields (validator - used function):
+```go
+"email": IsEmail,
+"url": IsURL,
+"dialstring": IsDialString,
+"requrl": IsRequestURL,
+"requri": IsRequestURI,
+"alpha": IsAlpha,
+"utfletter": IsUTFLetter,
+"alphanum": IsAlphanumeric,
+"utfletternum": IsUTFLetterNumeric,
+"numeric": IsNumeric,
+"utfnumeric": IsUTFNumeric,
+"utfdigit": IsUTFDigit,
+"hexadecimal": IsHexadecimal,
+"hexcolor": IsHexcolor,
+"rgbcolor": IsRGBcolor,
+"lowercase": IsLowerCase,
+"uppercase": IsUpperCase,
+"int": IsInt,
+"float": IsFloat,
+"null": IsNull,
+"uuid": IsUUID,
+"uuidv3": IsUUIDv3,
+"uuidv4": IsUUIDv4,
+"uuidv5": IsUUIDv5,
+"creditcard": IsCreditCard,
+"isbn10": IsISBN10,
+"isbn13": IsISBN13,
+"json": IsJSON,
+"multibyte": IsMultibyte,
+"ascii": IsASCII,
+"printableascii": IsPrintableASCII,
+"fullwidth": IsFullWidth,
+"halfwidth": IsHalfWidth,
+"variablewidth": IsVariableWidth,
+"base64": IsBase64,
+"datauri": IsDataURI,
+"ip": IsIP,
+"port": IsPort,
+"ipv4": IsIPv4,
+"ipv6": IsIPv6,
+"dns": IsDNSName,
+"host": IsHost,
+"mac": IsMAC,
+"latitude": IsLatitude,
+"longitude": IsLongitude,
+"ssn": IsSSN,
+"semver": IsSemver,
+"rfc3339": IsRFC3339,
+"rfc3339WithoutZone": IsRFC3339WithoutZone,
+"ISO3166Alpha2": IsISO3166Alpha2,
+"ISO3166Alpha3": IsISO3166Alpha3,
+"ulid": IsULID,
+```
+Validators with parameters
+
+```go
+"range(min|max)": Range,
+"length(min|max)": ByteLength,
+"runelength(min|max)": RuneLength,
+"stringlength(min|max)": StringLength,
+"matches(pattern)": StringMatches,
+"in(string1|string2|...|stringN)": IsIn,
+"rsapub(keylength)" : IsRsaPub,
+"minstringlength(int): MinStringLength,
+"maxstringlength(int): MaxStringLength,
+```
+Validators with parameters for any type
+
+```go
+"type(type)": IsType,
+```
+
+And here is small example of usage:
+```go
+type Post struct {
+ Title string `valid:"alphanum,required"`
+ Message string `valid:"duck,ascii"`
+ Message2 string `valid:"animal(dog)"`
+ AuthorIP string `valid:"ipv4"`
+ Date string `valid:"-"`
+}
+post := &Post{
+ Title: "My Example Post",
+ Message: "duck",
+ Message2: "dog",
+ AuthorIP: "123.234.54.3",
+}
+
+// Add your own struct validation tags
+govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
+ return str == "duck"
+})
+
+// Add your own struct validation tags with parameter
+govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool {
+ species := params[0]
+ return str == species
+})
+govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$")
+
+result, err := govalidator.ValidateStruct(post)
+if err != nil {
+ println("error: " + err.Error())
+}
+println(result)
+```
+###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338)
+If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}`
+
+So here is small example of usage:
+```go
+var mapTemplate = map[string]interface{}{
+ "name":"required,alpha",
+ "family":"required,alpha",
+ "email":"required,email",
+ "cell-phone":"numeric",
+ "address":map[string]interface{}{
+ "line1":"required,alphanum",
+ "line2":"alphanum",
+ "postal-code":"numeric",
+ },
+}
+
+var inputMap = map[string]interface{}{
+ "name":"Bob",
+ "family":"Smith",
+ "email":"foo@bar.baz",
+ "address":map[string]interface{}{
+ "line1":"",
+ "line2":"",
+ "postal-code":"",
+ },
+}
+
+result, err := govalidator.ValidateMap(inputMap, mapTemplate)
+if err != nil {
+ println("error: " + err.Error())
+}
+println(result)
+```
+
+###### WhiteList
+```go
+// Remove all characters from string ignoring characters between "a" and "z"
+println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
+```
+
+###### Custom validation functions
+Custom validation using your own domain specific validators is also available - here's an example of how to use it:
+```go
+import "github.com/asaskevich/govalidator"
+
+type CustomByteArray [6]byte // custom types are supported and can be validated
+
+type StructWithCustomByteArray struct {
+ ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
+ Email string `valid:"email"`
+ CustomMinLength int `valid:"-"`
+}
+
+govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool {
+ switch v := context.(type) { // you can type switch on the context interface being validated
+ case StructWithCustomByteArray:
+ // you can check and validate against some other field in the context,
+ // return early or not validate against the context at all – your choice
+ case SomeOtherType:
+ // ...
+ default:
+ // expecting some other type? Throw/panic here or continue
+ }
+
+ switch v := i.(type) { // type switch on the struct field being validated
+ case CustomByteArray:
+ for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
+ if e != 0 {
+ return true
+ }
+ }
+ }
+ return false
+})
+govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool {
+ switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
+ case StructWithCustomByteArray:
+ return len(v.ID) >= v.CustomMinLength
+ }
+ return false
+})
+```
+
+###### Loop over Error()
+By default .Error() returns all errors in a single String. To access each error you can do this:
+```go
+ if err != nil {
+ errs := err.(govalidator.Errors).Errors()
+ for _, e := range errs {
+ fmt.Println(e.Error())
+ }
+ }
+```
+
+###### Custom error messages
+Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it:
+```go
+type Ticket struct {
+ Id int64 `json:"id"`
+ FirstName string `json:"firstname" valid:"required~First name is blank"`
+}
+```
+
+#### Notes
+Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
+Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
+
+#### Support
+If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
+
+#### What to contribute
+If you don't know what to do, there are some features and functions that need to be done
+
+- [ ] Refactor code
+- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
+- [ ] Create actual list of contributors and projects that currently using this package
+- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
+- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
+- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
+- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
+- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
+- [ ] Implement fuzzing testing
+- [ ] Implement some struct/map/array utilities
+- [ ] Implement map/array validation
+- [ ] Implement benchmarking
+- [ ] Implement batch of examples
+- [ ] Look at forks for new features and fixes
+
+#### Advice
+Feel free to create what you want, but keep in mind when you implement new features:
+- Code must be clear and readable, names of variables/constants clearly describes what they are doing
+- Public functions must be documented and described in source file and added to README.md to the list of available functions
+- There are must be unit-tests for any new functions and improvements
+
+## Credits
+### Contributors
+
+This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
+
+#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
+* [Daniel Lohse](https://github.com/annismckenzie)
+* [Attila Oláh](https://github.com/attilaolah)
+* [Daniel Korner](https://github.com/Dadie)
+* [Steven Wilkin](https://github.com/stevenwilkin)
+* [Deiwin Sarjas](https://github.com/deiwin)
+* [Noah Shibley](https://github.com/slugmobile)
+* [Nathan Davies](https://github.com/nathj07)
+* [Matt Sanford](https://github.com/mzsanford)
+* [Simon ccl1115](https://github.com/ccl1115)
+
+
+
+
+### Backers
+
+Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)]
+
+
+
+
+### Sponsors
+
+Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## License
+[](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)
diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go
new file mode 100644
index 00000000..3e1da7cb
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/arrays.go
@@ -0,0 +1,87 @@
+package govalidator
+
+// Iterator is the function that accepts element of slice/array and its index
+type Iterator func(interface{}, int)
+
+// ResultIterator is the function that accepts element of slice/array and its index and returns any result
+type ResultIterator func(interface{}, int) interface{}
+
+// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
+type ConditionIterator func(interface{}, int) bool
+
+// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values
+type ReduceIterator func(interface{}, interface{}) interface{}
+
+// Some validates that any item of array corresponds to ConditionIterator. Returns boolean.
+func Some(array []interface{}, iterator ConditionIterator) bool {
+ res := false
+ for index, data := range array {
+ res = res || iterator(data, index)
+ }
+ return res
+}
+
+// Every validates that every item of array corresponds to ConditionIterator. Returns boolean.
+func Every(array []interface{}, iterator ConditionIterator) bool {
+ res := true
+ for index, data := range array {
+ res = res && iterator(data, index)
+ }
+ return res
+}
+
+// Reduce boils down a list of values into a single value by ReduceIterator
+func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} {
+ for _, data := range array {
+ initialValue = iterator(initialValue, data)
+ }
+ return initialValue
+}
+
+// Each iterates over the slice and apply Iterator to every item
+func Each(array []interface{}, iterator Iterator) {
+ for index, data := range array {
+ iterator(data, index)
+ }
+}
+
+// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
+func Map(array []interface{}, iterator ResultIterator) []interface{} {
+ var result = make([]interface{}, len(array))
+ for index, data := range array {
+ result[index] = iterator(data, index)
+ }
+ return result
+}
+
+// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
+func Find(array []interface{}, iterator ConditionIterator) interface{} {
+ for index, data := range array {
+ if iterator(data, index) {
+ return data
+ }
+ }
+ return nil
+}
+
+// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
+func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
+ var result = make([]interface{}, 0)
+ for index, data := range array {
+ if iterator(data, index) {
+ result = append(result, data)
+ }
+ }
+ return result
+}
+
+// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
+func Count(array []interface{}, iterator ConditionIterator) int {
+ count := 0
+ for index, data := range array {
+ if iterator(data, index) {
+ count = count + 1
+ }
+ }
+ return count
+}
diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go
new file mode 100644
index 00000000..d68e990f
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/converter.go
@@ -0,0 +1,81 @@
+package govalidator
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// ToString convert the input to a string.
+func ToString(obj interface{}) string {
+ res := fmt.Sprintf("%v", obj)
+ return res
+}
+
+// ToJSON convert the input to a valid JSON string
+func ToJSON(obj interface{}) (string, error) {
+ res, err := json.Marshal(obj)
+ if err != nil {
+ res = []byte("")
+ }
+ return string(res), err
+}
+
+// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
+func ToFloat(value interface{}) (res float64, err error) {
+ val := reflect.ValueOf(value)
+
+ switch value.(type) {
+ case int, int8, int16, int32, int64:
+ res = float64(val.Int())
+ case uint, uint8, uint16, uint32, uint64:
+ res = float64(val.Uint())
+ case float32, float64:
+ res = val.Float()
+ case string:
+ res, err = strconv.ParseFloat(val.String(), 64)
+ if err != nil {
+ res = 0
+ }
+ default:
+ err = fmt.Errorf("ToInt: unknown interface type %T", value)
+ res = 0
+ }
+
+ return
+}
+
+// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
+func ToInt(value interface{}) (res int64, err error) {
+ val := reflect.ValueOf(value)
+
+ switch value.(type) {
+ case int, int8, int16, int32, int64:
+ res = val.Int()
+ case uint, uint8, uint16, uint32, uint64:
+ res = int64(val.Uint())
+ case float32, float64:
+ res = int64(val.Float())
+ case string:
+ if IsInt(val.String()) {
+ res, err = strconv.ParseInt(val.String(), 0, 64)
+ if err != nil {
+ res = 0
+ }
+ } else {
+ err = fmt.Errorf("ToInt: invalid numeric format %g", value)
+ res = 0
+ }
+ default:
+ err = fmt.Errorf("ToInt: unknown interface type %T", value)
+ res = 0
+ }
+
+ return
+}
+
+// ToBoolean convert the input string to a boolean.
+func ToBoolean(str string) (bool, error) {
+ return strconv.ParseBool(str)
+}
diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go
new file mode 100644
index 00000000..55dce62d
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/doc.go
@@ -0,0 +1,3 @@
+package govalidator
+
+// A package of validators and sanitizers for strings, structures and collections.
diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go
new file mode 100644
index 00000000..1da2336f
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/error.go
@@ -0,0 +1,47 @@
+package govalidator
+
+import (
+ "sort"
+ "strings"
+)
+
+// Errors is an array of multiple errors and conforms to the error interface.
+type Errors []error
+
+// Errors returns itself.
+func (es Errors) Errors() []error {
+ return es
+}
+
+func (es Errors) Error() string {
+ var errs []string
+ for _, e := range es {
+ errs = append(errs, e.Error())
+ }
+ sort.Strings(errs)
+ return strings.Join(errs, ";")
+}
+
+// Error encapsulates a name, an error and whether there's a custom error message or not.
+type Error struct {
+ Name string
+ Err error
+ CustomErrorMessageExists bool
+
+ // Validator indicates the name of the validator that failed
+ Validator string
+ Path []string
+}
+
+func (e Error) Error() string {
+ if e.CustomErrorMessageExists {
+ return e.Err.Error()
+ }
+
+ errName := e.Name
+ if len(e.Path) > 0 {
+ errName = strings.Join(append(e.Path, e.Name), ".")
+ }
+
+ return errName + ": " + e.Err.Error()
+}
diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go
new file mode 100644
index 00000000..5041d9e8
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/numerics.go
@@ -0,0 +1,100 @@
+package govalidator
+
+import (
+ "math"
+)
+
+// Abs returns absolute value of number
+func Abs(value float64) float64 {
+ return math.Abs(value)
+}
+
+// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
+func Sign(value float64) float64 {
+ if value > 0 {
+ return 1
+ } else if value < 0 {
+ return -1
+ } else {
+ return 0
+ }
+}
+
+// IsNegative returns true if value < 0
+func IsNegative(value float64) bool {
+ return value < 0
+}
+
+// IsPositive returns true if value > 0
+func IsPositive(value float64) bool {
+ return value > 0
+}
+
+// IsNonNegative returns true if value >= 0
+func IsNonNegative(value float64) bool {
+ return value >= 0
+}
+
+// IsNonPositive returns true if value <= 0
+func IsNonPositive(value float64) bool {
+ return value <= 0
+}
+
+// InRangeInt returns true if value lies between left and right border
+func InRangeInt(value, left, right interface{}) bool {
+ value64, _ := ToInt(value)
+ left64, _ := ToInt(left)
+ right64, _ := ToInt(right)
+ if left64 > right64 {
+ left64, right64 = right64, left64
+ }
+ return value64 >= left64 && value64 <= right64
+}
+
+// InRangeFloat32 returns true if value lies between left and right border
+func InRangeFloat32(value, left, right float32) bool {
+ if left > right {
+ left, right = right, left
+ }
+ return value >= left && value <= right
+}
+
+// InRangeFloat64 returns true if value lies between left and right border
+func InRangeFloat64(value, left, right float64) bool {
+ if left > right {
+ left, right = right, left
+ }
+ return value >= left && value <= right
+}
+
+// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string.
+// All types must the same type.
+// False if value doesn't lie in range or if it incompatible or not comparable
+func InRange(value interface{}, left interface{}, right interface{}) bool {
+ switch value.(type) {
+ case int:
+ intValue, _ := ToInt(value)
+ intLeft, _ := ToInt(left)
+ intRight, _ := ToInt(right)
+ return InRangeInt(intValue, intLeft, intRight)
+ case float32, float64:
+ intValue, _ := ToFloat(value)
+ intLeft, _ := ToFloat(left)
+ intRight, _ := ToFloat(right)
+ return InRangeFloat64(intValue, intLeft, intRight)
+ case string:
+ return value.(string) >= left.(string) && value.(string) <= right.(string)
+ default:
+ return false
+ }
+}
+
+// IsWhole returns true if value is whole number
+func IsWhole(value float64) bool {
+ return math.Remainder(value, 1) == 0
+}
+
+// IsNatural returns true if value is natural number (positive and whole)
+func IsNatural(value float64) bool {
+ return IsWhole(value) && IsPositive(value)
+}
diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go
new file mode 100644
index 00000000..bafc3765
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/patterns.go
@@ -0,0 +1,113 @@
+package govalidator
+
+import "regexp"
+
+// Basic regular expressions for validating strings
+const (
+ Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
+ CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$"
+ ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
+ ISBN13 string = "^(?:[0-9]{13})$"
+ UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ Alpha string = "^[a-zA-Z]+$"
+ Alphanumeric string = "^[a-zA-Z0-9]+$"
+ Numeric string = "^[0-9]+$"
+ Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
+ Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
+ Hexadecimal string = "^[0-9a-fA-F]+$"
+ Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
+ ASCII string = "^[\x00-\x7F]+$"
+ Multibyte string = "[^\x00-\x7F]"
+ FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
+ HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
+ Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
+ PrintableASCII string = "^[\x20-\x7E]+$"
+ DataURI string = "^data:.+\\/(.+);base64$"
+ MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$"
+ Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
+ Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
+ DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
+ IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
+ URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
+ URLUsername string = `(\S+(:\S*)?@)`
+ URLPath string = `((\/|\?|#)[^\s]*)`
+ URLPort string = `(:(\d{1,5}))`
+ URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))`
+ URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
+ URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
+ SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
+ WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
+ UnixPath string = `^(/[^/\x00]*)+/?$`
+ WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
+ UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$`
+ Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
+ tagName string = "valid"
+ hasLowerCase string = ".*[[:lower:]]"
+ hasUpperCase string = ".*[[:upper:]]"
+ hasWhitespace string = ".*[[:space:]]"
+ hasWhitespaceOnly string = "^[[:space:]]+$"
+ IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$"
+ IMSI string = "^\\d{14,15}$"
+ E164 string = `^\+?[1-9]\d{1,14}$`
+)
+
+// Used by IsFilePath func
+const (
+ // Unknown is unresolved OS type
+ Unknown = iota
+ // Win is Windows type
+ Win
+ // Unix is *nix OS types
+ Unix
+)
+
+var (
+ userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
+ hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
+ userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
+ rxEmail = regexp.MustCompile(Email)
+ rxCreditCard = regexp.MustCompile(CreditCard)
+ rxISBN10 = regexp.MustCompile(ISBN10)
+ rxISBN13 = regexp.MustCompile(ISBN13)
+ rxUUID3 = regexp.MustCompile(UUID3)
+ rxUUID4 = regexp.MustCompile(UUID4)
+ rxUUID5 = regexp.MustCompile(UUID5)
+ rxUUID = regexp.MustCompile(UUID)
+ rxAlpha = regexp.MustCompile(Alpha)
+ rxAlphanumeric = regexp.MustCompile(Alphanumeric)
+ rxNumeric = regexp.MustCompile(Numeric)
+ rxInt = regexp.MustCompile(Int)
+ rxFloat = regexp.MustCompile(Float)
+ rxHexadecimal = regexp.MustCompile(Hexadecimal)
+ rxHexcolor = regexp.MustCompile(Hexcolor)
+ rxRGBcolor = regexp.MustCompile(RGBcolor)
+ rxASCII = regexp.MustCompile(ASCII)
+ rxPrintableASCII = regexp.MustCompile(PrintableASCII)
+ rxMultibyte = regexp.MustCompile(Multibyte)
+ rxFullWidth = regexp.MustCompile(FullWidth)
+ rxHalfWidth = regexp.MustCompile(HalfWidth)
+ rxBase64 = regexp.MustCompile(Base64)
+ rxDataURI = regexp.MustCompile(DataURI)
+ rxMagnetURI = regexp.MustCompile(MagnetURI)
+ rxLatitude = regexp.MustCompile(Latitude)
+ rxLongitude = regexp.MustCompile(Longitude)
+ rxDNSName = regexp.MustCompile(DNSName)
+ rxURL = regexp.MustCompile(URL)
+ rxSSN = regexp.MustCompile(SSN)
+ rxWinPath = regexp.MustCompile(WinPath)
+ rxUnixPath = regexp.MustCompile(UnixPath)
+ rxARWinPath = regexp.MustCompile(WinARPath)
+ rxARUnixPath = regexp.MustCompile(UnixARPath)
+ rxSemver = regexp.MustCompile(Semver)
+ rxHasLowerCase = regexp.MustCompile(hasLowerCase)
+ rxHasUpperCase = regexp.MustCompile(hasUpperCase)
+ rxHasWhitespace = regexp.MustCompile(hasWhitespace)
+ rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
+ rxIMEI = regexp.MustCompile(IMEI)
+ rxIMSI = regexp.MustCompile(IMSI)
+ rxE164 = regexp.MustCompile(E164)
+)
diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go
new file mode 100644
index 00000000..c573abb5
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/types.go
@@ -0,0 +1,656 @@
+package govalidator
+
+import (
+ "reflect"
+ "regexp"
+ "sort"
+ "sync"
+)
+
+// Validator is a wrapper for a validator function that returns bool and accepts string.
+type Validator func(str string) bool
+
+// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
+// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
+type CustomTypeValidator func(i interface{}, o interface{}) bool
+
+// ParamValidator is a wrapper for validator functions that accept additional parameters.
+type ParamValidator func(str string, params ...string) bool
+
+// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value
+type InterfaceParamValidator func(in interface{}, params ...string) bool
+type tagOptionsMap map[string]tagOption
+
+func (t tagOptionsMap) orderedKeys() []string {
+ var keys []string
+ for k := range t {
+ keys = append(keys, k)
+ }
+
+ sort.Slice(keys, func(a, b int) bool {
+ return t[keys[a]].order < t[keys[b]].order
+ })
+
+ return keys
+}
+
+type tagOption struct {
+ name string
+ customErrorMessage string
+ order int
+}
+
+// UnsupportedTypeError is a wrapper for reflect.Type
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value
+var InterfaceParamTagMap = map[string]InterfaceParamValidator{
+ "type": IsType,
+}
+
+// InterfaceParamTagRegexMap maps interface param tags to their respective regexes.
+var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{
+ "type": regexp.MustCompile(`^type\((.*)\)$`),
+}
+
+// ParamTagMap is a map of functions accept variants parameters
+var ParamTagMap = map[string]ParamValidator{
+ "length": ByteLength,
+ "range": Range,
+ "runelength": RuneLength,
+ "stringlength": StringLength,
+ "matches": StringMatches,
+ "in": IsInRaw,
+ "rsapub": IsRsaPub,
+ "minstringlength": MinStringLength,
+ "maxstringlength": MaxStringLength,
+}
+
+// ParamTagRegexMap maps param tags to their respective regexes.
+var ParamTagRegexMap = map[string]*regexp.Regexp{
+ "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
+ "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
+ "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
+ "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
+ "in": regexp.MustCompile(`^in\((.*)\)`),
+ "matches": regexp.MustCompile(`^matches\((.+)\)$`),
+ "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
+ "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"),
+ "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"),
+}
+
+type customTypeTagMap struct {
+ validators map[string]CustomTypeValidator
+
+ sync.RWMutex
+}
+
+func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
+ tm.RLock()
+ defer tm.RUnlock()
+ v, ok := tm.validators[name]
+ return v, ok
+}
+
+func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
+ tm.Lock()
+ defer tm.Unlock()
+ tm.validators[name] = ctv
+}
+
+// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
+// Use this to validate compound or custom types that need to be handled as a whole, e.g.
+// `type UUID [16]byte` (this would be handled as an array of bytes).
+var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
+
+// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
+var TagMap = map[string]Validator{
+ "email": IsEmail,
+ "url": IsURL,
+ "dialstring": IsDialString,
+ "requrl": IsRequestURL,
+ "requri": IsRequestURI,
+ "alpha": IsAlpha,
+ "utfletter": IsUTFLetter,
+ "alphanum": IsAlphanumeric,
+ "utfletternum": IsUTFLetterNumeric,
+ "numeric": IsNumeric,
+ "utfnumeric": IsUTFNumeric,
+ "utfdigit": IsUTFDigit,
+ "hexadecimal": IsHexadecimal,
+ "hexcolor": IsHexcolor,
+ "rgbcolor": IsRGBcolor,
+ "lowercase": IsLowerCase,
+ "uppercase": IsUpperCase,
+ "int": IsInt,
+ "float": IsFloat,
+ "null": IsNull,
+ "notnull": IsNotNull,
+ "uuid": IsUUID,
+ "uuidv3": IsUUIDv3,
+ "uuidv4": IsUUIDv4,
+ "uuidv5": IsUUIDv5,
+ "creditcard": IsCreditCard,
+ "isbn10": IsISBN10,
+ "isbn13": IsISBN13,
+ "json": IsJSON,
+ "multibyte": IsMultibyte,
+ "ascii": IsASCII,
+ "printableascii": IsPrintableASCII,
+ "fullwidth": IsFullWidth,
+ "halfwidth": IsHalfWidth,
+ "variablewidth": IsVariableWidth,
+ "base64": IsBase64,
+ "datauri": IsDataURI,
+ "ip": IsIP,
+ "port": IsPort,
+ "ipv4": IsIPv4,
+ "ipv6": IsIPv6,
+ "dns": IsDNSName,
+ "host": IsHost,
+ "mac": IsMAC,
+ "latitude": IsLatitude,
+ "longitude": IsLongitude,
+ "ssn": IsSSN,
+ "semver": IsSemver,
+ "rfc3339": IsRFC3339,
+ "rfc3339WithoutZone": IsRFC3339WithoutZone,
+ "ISO3166Alpha2": IsISO3166Alpha2,
+ "ISO3166Alpha3": IsISO3166Alpha3,
+ "ISO4217": IsISO4217,
+ "IMEI": IsIMEI,
+ "ulid": IsULID,
+}
+
+// ISO3166Entry stores country codes
+type ISO3166Entry struct {
+ EnglishShortName string
+ FrenchShortName string
+ Alpha2Code string
+ Alpha3Code string
+ Numeric string
+}
+
+//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
+var ISO3166List = []ISO3166Entry{
+ {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
+ {"Albania", "Albanie (l')", "AL", "ALB", "008"},
+ {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
+ {"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
+ {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
+ {"Andorra", "Andorre (l')", "AD", "AND", "020"},
+ {"Angola", "Angola (l')", "AO", "AGO", "024"},
+ {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
+ {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
+ {"Argentina", "Argentine (l')", "AR", "ARG", "032"},
+ {"Australia", "Australie (l')", "AU", "AUS", "036"},
+ {"Austria", "Autriche (l')", "AT", "AUT", "040"},
+ {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
+ {"Bahrain", "Bahreïn", "BH", "BHR", "048"},
+ {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
+ {"Armenia", "Arménie (l')", "AM", "ARM", "051"},
+ {"Barbados", "Barbade (la)", "BB", "BRB", "052"},
+ {"Belgium", "Belgique (la)", "BE", "BEL", "056"},
+ {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
+ {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
+ {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
+ {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
+ {"Botswana", "Botswana (le)", "BW", "BWA", "072"},
+ {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
+ {"Brazil", "Brésil (le)", "BR", "BRA", "076"},
+ {"Belize", "Belize (le)", "BZ", "BLZ", "084"},
+ {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
+ {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
+ {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
+ {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
+ {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
+ {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
+ {"Burundi", "Burundi (le)", "BI", "BDI", "108"},
+ {"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
+ {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
+ {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
+ {"Canada", "Canada (le)", "CA", "CAN", "124"},
+ {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
+ {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
+ {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
+ {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
+ {"Chad", "Tchad (le)", "TD", "TCD", "148"},
+ {"Chile", "Chili (le)", "CL", "CHL", "152"},
+ {"China", "Chine (la)", "CN", "CHN", "156"},
+ {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
+ {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
+ {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
+ {"Colombia", "Colombie (la)", "CO", "COL", "170"},
+ {"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
+ {"Mayotte", "Mayotte", "YT", "MYT", "175"},
+ {"Congo (the)", "Congo (le)", "CG", "COG", "178"},
+ {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
+ {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
+ {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
+ {"Croatia", "Croatie (la)", "HR", "HRV", "191"},
+ {"Cuba", "Cuba", "CU", "CUB", "192"},
+ {"Cyprus", "Chypre", "CY", "CYP", "196"},
+ {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
+ {"Benin", "Bénin (le)", "BJ", "BEN", "204"},
+ {"Denmark", "Danemark (le)", "DK", "DNK", "208"},
+ {"Dominica", "Dominique (la)", "DM", "DMA", "212"},
+ {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
+ {"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
+ {"El Salvador", "El Salvador", "SV", "SLV", "222"},
+ {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
+ {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
+ {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
+ {"Estonia", "Estonie (l')", "EE", "EST", "233"},
+ {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
+ {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
+ {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
+ {"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
+ {"Finland", "Finlande (la)", "FI", "FIN", "246"},
+ {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
+ {"France", "France (la)", "FR", "FRA", "250"},
+ {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
+ {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
+ {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
+ {"Djibouti", "Djibouti", "DJ", "DJI", "262"},
+ {"Gabon", "Gabon (le)", "GA", "GAB", "266"},
+ {"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
+ {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
+ {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
+ {"Germany", "Allemagne (l')", "DE", "DEU", "276"},
+ {"Ghana", "Ghana (le)", "GH", "GHA", "288"},
+ {"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
+ {"Kiribati", "Kiribati", "KI", "KIR", "296"},
+ {"Greece", "Grèce (la)", "GR", "GRC", "300"},
+ {"Greenland", "Groenland (le)", "GL", "GRL", "304"},
+ {"Grenada", "Grenade (la)", "GD", "GRD", "308"},
+ {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
+ {"Guam", "Guam", "GU", "GUM", "316"},
+ {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
+ {"Guinea", "Guinée (la)", "GN", "GIN", "324"},
+ {"Guyana", "Guyana (le)", "GY", "GUY", "328"},
+ {"Haiti", "Haïti", "HT", "HTI", "332"},
+ {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
+ {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
+ {"Honduras", "Honduras (le)", "HN", "HND", "340"},
+ {"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
+ {"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
+ {"Iceland", "Islande (l')", "IS", "ISL", "352"},
+ {"India", "Inde (l')", "IN", "IND", "356"},
+ {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
+ {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
+ {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
+ {"Ireland", "Irlande (l')", "IE", "IRL", "372"},
+ {"Israel", "Israël", "IL", "ISR", "376"},
+ {"Italy", "Italie (l')", "IT", "ITA", "380"},
+ {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
+ {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
+ {"Japan", "Japon (le)", "JP", "JPN", "392"},
+ {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
+ {"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
+ {"Kenya", "Kenya (le)", "KE", "KEN", "404"},
+ {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
+ {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
+ {"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
+ {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
+ {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
+ {"Lebanon", "Liban (le)", "LB", "LBN", "422"},
+ {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
+ {"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
+ {"Liberia", "Libéria (le)", "LR", "LBR", "430"},
+ {"Libya", "Libye (la)", "LY", "LBY", "434"},
+ {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
+ {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
+ {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
+ {"Macao", "Macao", "MO", "MAC", "446"},
+ {"Madagascar", "Madagascar", "MG", "MDG", "450"},
+ {"Malawi", "Malawi (le)", "MW", "MWI", "454"},
+ {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
+ {"Maldives", "Maldives (les)", "MV", "MDV", "462"},
+ {"Mali", "Mali (le)", "ML", "MLI", "466"},
+ {"Malta", "Malte", "MT", "MLT", "470"},
+ {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
+ {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
+ {"Mauritius", "Maurice", "MU", "MUS", "480"},
+ {"Mexico", "Mexique (le)", "MX", "MEX", "484"},
+ {"Monaco", "Monaco", "MC", "MCO", "492"},
+ {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
+ {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
+ {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
+ {"Montserrat", "Montserrat", "MS", "MSR", "500"},
+ {"Morocco", "Maroc (le)", "MA", "MAR", "504"},
+ {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
+ {"Oman", "Oman", "OM", "OMN", "512"},
+ {"Namibia", "Namibie (la)", "NA", "NAM", "516"},
+ {"Nauru", "Nauru", "NR", "NRU", "520"},
+ {"Nepal", "Népal (le)", "NP", "NPL", "524"},
+ {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
+ {"Curaçao", "Curaçao", "CW", "CUW", "531"},
+ {"Aruba", "Aruba", "AW", "ABW", "533"},
+ {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
+ {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
+ {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
+ {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
+ {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
+ {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
+ {"Niger (the)", "Niger (le)", "NE", "NER", "562"},
+ {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
+ {"Niue", "Niue", "NU", "NIU", "570"},
+ {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
+ {"Norway", "Norvège (la)", "NO", "NOR", "578"},
+ {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
+ {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
+ {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
+ {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
+ {"Palau", "Palaos (les)", "PW", "PLW", "585"},
+ {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
+ {"Panama", "Panama (le)", "PA", "PAN", "591"},
+ {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
+ {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
+ {"Peru", "Pérou (le)", "PE", "PER", "604"},
+ {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
+ {"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
+ {"Poland", "Pologne (la)", "PL", "POL", "616"},
+ {"Portugal", "Portugal (le)", "PT", "PRT", "620"},
+ {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
+ {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
+ {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
+ {"Qatar", "Qatar (le)", "QA", "QAT", "634"},
+ {"Réunion", "Réunion (La)", "RE", "REU", "638"},
+ {"Romania", "Roumanie (la)", "RO", "ROU", "642"},
+ {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
+ {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
+ {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
+ {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
+ {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
+ {"Anguilla", "Anguilla", "AI", "AIA", "660"},
+ {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
+ {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
+ {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
+ {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
+ {"San Marino", "Saint-Marin", "SM", "SMR", "674"},
+ {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
+ {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
+ {"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
+ {"Serbia", "Serbie (la)", "RS", "SRB", "688"},
+ {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
+ {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
+ {"Singapore", "Singapour", "SG", "SGP", "702"},
+ {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
+ {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
+ {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
+ {"Somalia", "Somalie (la)", "SO", "SOM", "706"},
+ {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
+ {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
+ {"Spain", "Espagne (l')", "ES", "ESP", "724"},
+ {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
+ {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
+ {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
+ {"Suriname", "Suriname (le)", "SR", "SUR", "740"},
+ {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
+ {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
+ {"Sweden", "Suède (la)", "SE", "SWE", "752"},
+ {"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
+ {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
+ {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
+ {"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
+ {"Togo", "Togo (le)", "TG", "TGO", "768"},
+ {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
+ {"Tonga", "Tonga (les)", "TO", "TON", "776"},
+ {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
+ {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
+ {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
+ {"Turkey", "Turquie (la)", "TR", "TUR", "792"},
+ {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
+ {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
+ {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
+ {"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
+ {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
+ {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
+ {"Egypt", "Égypte (l')", "EG", "EGY", "818"},
+ {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
+ {"Guernsey", "Guernesey", "GG", "GGY", "831"},
+ {"Jersey", "Jersey", "JE", "JEY", "832"},
+ {"Isle of Man", "Île de Man", "IM", "IMN", "833"},
+ {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
+ {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
+ {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
+ {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
+ {"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
+ {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
+ {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
+ {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
+ {"Samoa", "Samoa (le)", "WS", "WSM", "882"},
+ {"Yemen", "Yémen (le)", "YE", "YEM", "887"},
+ {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
+}
+
+// ISO4217List is the list of ISO currency codes
+var ISO4217List = []string{
+ "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
+ "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD",
+ "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
+ "DJF", "DKK", "DOP", "DZD",
+ "EGP", "ERN", "ETB", "EUR",
+ "FJD", "FKP",
+ "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD",
+ "HKD", "HNL", "HRK", "HTG", "HUF",
+ "IDR", "ILS", "INR", "IQD", "IRR", "ISK",
+ "JMD", "JOD", "JPY",
+ "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT",
+ "LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
+ "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN",
+ "NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
+ "OMR",
+ "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
+ "QAR",
+ "RON", "RSD", "RUB", "RWF",
+ "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL",
+ "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
+ "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS",
+ "VEF", "VES", "VND", "VUV",
+ "WST",
+ "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
+ "YER",
+ "ZAR", "ZMW", "ZWL",
+}
+
+// ISO693Entry stores ISO language codes
+type ISO693Entry struct {
+ Alpha3bCode string
+ Alpha2Code string
+ English string
+}
+
+//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json
+var ISO693List = []ISO693Entry{
+ {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"},
+ {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"},
+ {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"},
+ {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"},
+ {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"},
+ {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"},
+ {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"},
+ {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"},
+ {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"},
+ {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"},
+ {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"},
+ {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"},
+ {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"},
+ {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"},
+ {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"},
+ {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"},
+ {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"},
+ {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"},
+ {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"},
+ {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"},
+ {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"},
+ {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"},
+ {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"},
+ {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"},
+ {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"},
+ {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"},
+ {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"},
+ {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"},
+ {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"},
+ {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"},
+ {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"},
+ {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"},
+ {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"},
+ {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"},
+ {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"},
+ {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"},
+ {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"},
+ {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"},
+ {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"},
+ {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"},
+ {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"},
+ {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"},
+ {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"},
+ {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"},
+ {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"},
+ {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"},
+ {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"},
+ {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"},
+ {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"},
+ {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"},
+ {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"},
+ {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"},
+ {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"},
+ {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"},
+ {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"},
+ {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"},
+ {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"},
+ {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"},
+ {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"},
+ {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"},
+ {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"},
+ {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"},
+ {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"},
+ {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"},
+ {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"},
+ {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"},
+ {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"},
+ {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"},
+ {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"},
+ {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"},
+ {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"},
+ {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"},
+ {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"},
+ {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"},
+ {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"},
+ {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"},
+ {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"},
+ {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"},
+ {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"},
+ {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"},
+ {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"},
+ {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"},
+ {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"},
+ {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"},
+ {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"},
+ {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"},
+ {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"},
+ {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"},
+ {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"},
+ {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"},
+ {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"},
+ {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"},
+ {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"},
+ {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"},
+ {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"},
+ {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"},
+ {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"},
+ {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"},
+ {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"},
+ {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"},
+ {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"},
+ {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"},
+ {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"},
+ {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"},
+ {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"},
+ {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"},
+ {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"},
+ {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"},
+ {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"},
+ {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"},
+ {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"},
+ {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"},
+ {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"},
+ {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"},
+ {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"},
+ {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"},
+ {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"},
+ {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"},
+ {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"},
+ {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"},
+ {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"},
+ {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"},
+ {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"},
+ {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"},
+ {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"},
+ {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"},
+ {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"},
+ {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"},
+ {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"},
+ {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"},
+ {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"},
+ {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"},
+ {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"},
+ {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"},
+ {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"},
+ {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"},
+ {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"},
+ {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"},
+ {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"},
+ {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"},
+ {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"},
+ {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"},
+ {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"},
+ {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"},
+ {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"},
+ {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"},
+ {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"},
+ {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"},
+ {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"},
+ {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"},
+ {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"},
+ {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"},
+ {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"},
+ {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"},
+ {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"},
+ {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"},
+ {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"},
+ {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"},
+ {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"},
+ {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"},
+ {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"},
+ {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"},
+ {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"},
+ {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"},
+ {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"},
+ {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"},
+ {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"},
+ {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"},
+ {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"},
+ {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"},
+ {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"},
+ {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"},
+ {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"},
+ {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"},
+ {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"},
+ {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"},
+ {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"},
+ {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"},
+ {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"},
+ {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"},
+ {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"},
+ {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"},
+ {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"},
+ {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"},
+}
diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go
new file mode 100644
index 00000000..f4c30f82
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/utils.go
@@ -0,0 +1,270 @@
+package govalidator
+
+import (
+ "errors"
+ "fmt"
+ "html"
+ "math"
+ "path"
+ "regexp"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Contains checks if the string contains the substring.
+func Contains(str, substring string) bool {
+ return strings.Contains(str, substring)
+}
+
+// Matches checks if string matches the pattern (pattern is regular expression)
+// In case of error return false
+func Matches(str, pattern string) bool {
+ match, _ := regexp.MatchString(pattern, str)
+ return match
+}
+
+// LeftTrim trims characters from the left side of the input.
+// If second argument is empty, it will remove leading spaces.
+func LeftTrim(str, chars string) string {
+ if chars == "" {
+ return strings.TrimLeftFunc(str, unicode.IsSpace)
+ }
+ r, _ := regexp.Compile("^[" + chars + "]+")
+ return r.ReplaceAllString(str, "")
+}
+
+// RightTrim trims characters from the right side of the input.
+// If second argument is empty, it will remove trailing spaces.
+func RightTrim(str, chars string) string {
+ if chars == "" {
+ return strings.TrimRightFunc(str, unicode.IsSpace)
+ }
+ r, _ := regexp.Compile("[" + chars + "]+$")
+ return r.ReplaceAllString(str, "")
+}
+
+// Trim trims characters from both sides of the input.
+// If second argument is empty, it will remove spaces.
+func Trim(str, chars string) string {
+ return LeftTrim(RightTrim(str, chars), chars)
+}
+
+// WhiteList removes characters that do not appear in the whitelist.
+func WhiteList(str, chars string) string {
+ pattern := "[^" + chars + "]+"
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, "")
+}
+
+// BlackList removes characters that appear in the blacklist.
+func BlackList(str, chars string) string {
+ pattern := "[" + chars + "]+"
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, "")
+}
+
+// StripLow removes characters with a numerical value < 32 and 127, mostly control characters.
+// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
+func StripLow(str string, keepNewLines bool) string {
+ chars := ""
+ if keepNewLines {
+ chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
+ } else {
+ chars = "\x00-\x1F\x7F"
+ }
+ return BlackList(str, chars)
+}
+
+// ReplacePattern replaces regular expression pattern in string
+func ReplacePattern(str, pattern, replace string) string {
+ r, _ := regexp.Compile(pattern)
+ return r.ReplaceAllString(str, replace)
+}
+
+// Escape replaces <, >, & and " with HTML entities.
+var Escape = html.EscapeString
+
+func addSegment(inrune, segment []rune) []rune {
+ if len(segment) == 0 {
+ return inrune
+ }
+ if len(inrune) != 0 {
+ inrune = append(inrune, '_')
+ }
+ inrune = append(inrune, segment...)
+ return inrune
+}
+
+// UnderscoreToCamelCase converts from underscore separated form to camel case form.
+// Ex.: my_func => MyFunc
+func UnderscoreToCamelCase(s string) string {
+ return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
+}
+
+// CamelCaseToUnderscore converts from camel case form to underscore separated form.
+// Ex.: MyFunc => my_func
+func CamelCaseToUnderscore(str string) string {
+ var output []rune
+ var segment []rune
+ for _, r := range str {
+
+ // not treat number as separate segment
+ if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) {
+ output = addSegment(output, segment)
+ segment = nil
+ }
+ segment = append(segment, unicode.ToLower(r))
+ }
+ output = addSegment(output, segment)
+ return string(output)
+}
+
+// Reverse returns reversed string
+func Reverse(s string) string {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r)
+}
+
+// GetLines splits string by "\n" and return array of lines
+func GetLines(s string) []string {
+ return strings.Split(s, "\n")
+}
+
+// GetLine returns specified line of multiline string
+func GetLine(s string, index int) (string, error) {
+ lines := GetLines(s)
+ if index < 0 || index >= len(lines) {
+ return "", errors.New("line index out of bounds")
+ }
+ return lines[index], nil
+}
+
+// RemoveTags removes all tags from HTML string
+func RemoveTags(s string) string {
+ return ReplacePattern(s, "<[^>]*>", "")
+}
+
+// SafeFileName returns safe string that can be used in file names
+func SafeFileName(str string) string {
+ name := strings.ToLower(str)
+ name = path.Clean(path.Base(name))
+ name = strings.Trim(name, " ")
+ separators, err := regexp.Compile(`[ &_=+:]`)
+ if err == nil {
+ name = separators.ReplaceAllString(name, "-")
+ }
+ legal, err := regexp.Compile(`[^[:alnum:]-.]`)
+ if err == nil {
+ name = legal.ReplaceAllString(name, "")
+ }
+ for strings.Contains(name, "--") {
+ name = strings.Replace(name, "--", "-", -1)
+ }
+ return name
+}
+
+// NormalizeEmail canonicalize an email address.
+// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
+// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
+// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
+// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
+// normalized to @gmail.com.
+func NormalizeEmail(str string) (string, error) {
+ if !IsEmail(str) {
+ return "", fmt.Errorf("%s is not an email", str)
+ }
+ parts := strings.Split(str, "@")
+ parts[0] = strings.ToLower(parts[0])
+ parts[1] = strings.ToLower(parts[1])
+ if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
+ parts[1] = "gmail.com"
+ parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
+ }
+ return strings.Join(parts, "@"), nil
+}
+
+// Truncate a string to the closest length without breaking words.
+func Truncate(str string, length int, ending string) string {
+ var aftstr, befstr string
+ if len(str) > length {
+ words := strings.Fields(str)
+ before, present := 0, 0
+ for i := range words {
+ befstr = aftstr
+ before = present
+ aftstr = aftstr + words[i] + " "
+ present = len(aftstr)
+ if present > length && i != 0 {
+ if (length - before) < (present - length) {
+ return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
+ }
+ return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
+ }
+ }
+ }
+
+ return str
+}
+
+// PadLeft pads left side of a string if size of string is less then indicated pad length
+func PadLeft(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, true, false)
+}
+
+// PadRight pads right side of a string if size of string is less then indicated pad length
+func PadRight(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, false, true)
+}
+
+// PadBoth pads both sides of a string if size of string is less then indicated pad length
+func PadBoth(str string, padStr string, padLen int) string {
+ return buildPadStr(str, padStr, padLen, true, true)
+}
+
+// PadString either left, right or both sides.
+// Note that padding string can be unicode and more then one character
+func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
+
+ // When padded length is less then the current string size
+ if padLen < utf8.RuneCountInString(str) {
+ return str
+ }
+
+ padLen -= utf8.RuneCountInString(str)
+
+ targetLen := padLen
+
+ targetLenLeft := targetLen
+ targetLenRight := targetLen
+ if padLeft && padRight {
+ targetLenLeft = padLen / 2
+ targetLenRight = padLen - targetLenLeft
+ }
+
+ strToRepeatLen := utf8.RuneCountInString(padStr)
+
+ repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
+ repeatedString := strings.Repeat(padStr, repeatTimes)
+
+ leftSide := ""
+ if padLeft {
+ leftSide = repeatedString[0:targetLenLeft]
+ }
+
+ rightSide := ""
+ if padRight {
+ rightSide = repeatedString[0:targetLenRight]
+ }
+
+ return leftSide + str + rightSide
+}
+
+// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
+func TruncatingErrorf(str string, args ...interface{}) error {
+ n := strings.Count(str, "%s")
+ return fmt.Errorf(str, args[:n]...)
+}
diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go
new file mode 100644
index 00000000..c9c4fac0
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/validator.go
@@ -0,0 +1,1768 @@
+// Package govalidator is package of validators and sanitizers for strings, structs and collections.
+package govalidator
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/url"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+var (
+ fieldsRequiredByDefault bool
+ nilPtrAllowedByRequired = false
+ notNumberRegexp = regexp.MustCompile("[^0-9]+")
+ whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`)
+ paramsRegexp = regexp.MustCompile(`\(.*\)$`)
+)
+
+const maxURLRuneCount = 2083
+const minURLRuneCount = 3
+const rfc3339WithoutZone = "2006-01-02T15:04:05"
+
+// SetFieldsRequiredByDefault causes validation to fail when struct fields
+// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`).
+// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
+// type exampleStruct struct {
+// Name string ``
+// Email string `valid:"email"`
+// This, however, will only fail when Email is empty or an invalid email address:
+// type exampleStruct2 struct {
+// Name string `valid:"-"`
+// Email string `valid:"email"`
+// Lastly, this will only fail when Email is an invalid email address but not when it's empty:
+// type exampleStruct2 struct {
+// Name string `valid:"-"`
+// Email string `valid:"email,optional"`
+func SetFieldsRequiredByDefault(value bool) {
+ fieldsRequiredByDefault = value
+}
+
+// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required.
+// The validation will still reject ptr fields in their zero value state. Example with this enabled:
+// type exampleStruct struct {
+// Name *string `valid:"required"`
+// With `Name` set to "", this will be considered invalid input and will cause a validation error.
+// With `Name` set to nil, this will be considered valid by validation.
+// By default this is disabled.
+func SetNilPtrAllowedByRequired(value bool) {
+ nilPtrAllowedByRequired = value
+}
+
+// IsEmail checks if the string is an email.
+func IsEmail(str string) bool {
+ // TODO uppercase letters are not supported
+ return rxEmail.MatchString(str)
+}
+
+// IsExistingEmail checks if the string is an email of existing domain
+func IsExistingEmail(email string) bool {
+
+ if len(email) < 6 || len(email) > 254 {
+ return false
+ }
+ at := strings.LastIndex(email, "@")
+ if at <= 0 || at > len(email)-3 {
+ return false
+ }
+ user := email[:at]
+ host := email[at+1:]
+ if len(user) > 64 {
+ return false
+ }
+ switch host {
+ case "localhost", "example.com":
+ return true
+ }
+ if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) {
+ return false
+ }
+ if _, err := net.LookupMX(host); err != nil {
+ if _, err := net.LookupIP(host); err != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// IsURL checks if the string is an URL.
+func IsURL(str string) bool {
+ if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") {
+ return false
+ }
+ strTemp := str
+ if strings.Contains(str, ":") && !strings.Contains(str, "://") {
+ // support no indicated urlscheme but with colon for port number
+ // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString
+ strTemp = "http://" + str
+ }
+ u, err := url.Parse(strTemp)
+ if err != nil {
+ return false
+ }
+ if strings.HasPrefix(u.Host, ".") {
+ return false
+ }
+ if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) {
+ return false
+ }
+ return rxURL.MatchString(str)
+}
+
+// IsRequestURL checks if the string rawurl, assuming
+// it was received in an HTTP request, is a valid
+// URL confirm to RFC 3986
+func IsRequestURL(rawurl string) bool {
+ url, err := url.ParseRequestURI(rawurl)
+ if err != nil {
+ return false //Couldn't even parse the rawurl
+ }
+ if len(url.Scheme) == 0 {
+ return false //No Scheme found
+ }
+ return true
+}
+
+// IsRequestURI checks if the string rawurl, assuming
+// it was received in an HTTP request, is an
+// absolute URI or an absolute path.
+func IsRequestURI(rawurl string) bool {
+ _, err := url.ParseRequestURI(rawurl)
+ return err == nil
+}
+
+// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid.
+func IsAlpha(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxAlpha.MatchString(str)
+}
+
+//IsUTFLetter checks if the string contains only unicode letter characters.
+//Similar to IsAlpha but for all languages. Empty string is valid.
+func IsUTFLetter(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+
+ for _, c := range str {
+ if !unicode.IsLetter(c) {
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid.
+func IsAlphanumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxAlphanumeric.MatchString(str)
+}
+
+// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid.
+func IsUTFLetterNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ for _, c := range str {
+ if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsNumeric checks if the string contains only numbers. Empty string is valid.
+func IsNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxNumeric.MatchString(str)
+}
+
+// IsUTFNumeric checks if the string contains only unicode numbers of any kind.
+// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid.
+func IsUTFNumeric(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ if strings.IndexAny(str, "+-") > 0 {
+ return false
+ }
+ if len(str) > 1 {
+ str = strings.TrimPrefix(str, "-")
+ str = strings.TrimPrefix(str, "+")
+ }
+ for _, c := range str {
+ if !unicode.IsNumber(c) { //numbers && minus sign are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid.
+func IsUTFDigit(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ if strings.IndexAny(str, "+-") > 0 {
+ return false
+ }
+ if len(str) > 1 {
+ str = strings.TrimPrefix(str, "-")
+ str = strings.TrimPrefix(str, "+")
+ }
+ for _, c := range str {
+ if !unicode.IsDigit(c) { //digits && minus sign are ok
+ return false
+ }
+ }
+ return true
+
+}
+
+// IsHexadecimal checks if the string is a hexadecimal number.
+func IsHexadecimal(str string) bool {
+ return rxHexadecimal.MatchString(str)
+}
+
+// IsHexcolor checks if the string is a hexadecimal color.
+func IsHexcolor(str string) bool {
+ return rxHexcolor.MatchString(str)
+}
+
+// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB).
+func IsRGBcolor(str string) bool {
+ return rxRGBcolor.MatchString(str)
+}
+
+// IsLowerCase checks if the string is lowercase. Empty string is valid.
+func IsLowerCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return str == strings.ToLower(str)
+}
+
+// IsUpperCase checks if the string is uppercase. Empty string is valid.
+func IsUpperCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return str == strings.ToUpper(str)
+}
+
+// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid.
+func HasLowerCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHasLowerCase.MatchString(str)
+}
+
+// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid.
+func HasUpperCase(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHasUpperCase.MatchString(str)
+}
+
+// IsInt checks if the string is an integer. Empty string is valid.
+func IsInt(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxInt.MatchString(str)
+}
+
+// IsFloat checks if the string is a float.
+func IsFloat(str string) bool {
+ return str != "" && rxFloat.MatchString(str)
+}
+
+// IsDivisibleBy checks if the string is a number that's divisible by another.
+// If second argument is not valid integer or zero, it's return false.
+// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero).
+func IsDivisibleBy(str, num string) bool {
+ f, _ := ToFloat(str)
+ p := int64(f)
+ q, _ := ToInt(num)
+ if q == 0 {
+ return false
+ }
+ return (p == 0) || (p%q == 0)
+}
+
+// IsNull checks if the string is null.
+func IsNull(str string) bool {
+ return len(str) == 0
+}
+
+// IsNotNull checks if the string is not null.
+func IsNotNull(str string) bool {
+ return !IsNull(str)
+}
+
+// HasWhitespaceOnly checks the string only contains whitespace
+func HasWhitespaceOnly(str string) bool {
+ return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str)
+}
+
+// HasWhitespace checks if the string contains any whitespace
+func HasWhitespace(str string) bool {
+ return len(str) > 0 && rxHasWhitespace.MatchString(str)
+}
+
+// IsByteLength checks if the string's length (in bytes) falls in a range.
+func IsByteLength(str string, min, max int) bool {
+ return len(str) >= min && len(str) <= max
+}
+
+// IsUUIDv3 checks if the string is a UUID version 3.
+func IsUUIDv3(str string) bool {
+ return rxUUID3.MatchString(str)
+}
+
+// IsUUIDv4 checks if the string is a UUID version 4.
+func IsUUIDv4(str string) bool {
+ return rxUUID4.MatchString(str)
+}
+
+// IsUUIDv5 checks if the string is a UUID version 5.
+func IsUUIDv5(str string) bool {
+ return rxUUID5.MatchString(str)
+}
+
+// IsUUID checks if the string is a UUID (version 3, 4 or 5).
+func IsUUID(str string) bool {
+ return rxUUID.MatchString(str)
+}
+
+// Byte to index table for O(1) lookups when unmarshaling.
+// We use 0xFF as sentinel value for invalid indexes.
+var ulidDec = [...]byte{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
+ 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
+ 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
+ 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14,
+ 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
+ 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+}
+
+// EncodedSize is the length of a text encoded ULID.
+const ulidEncodedSize = 26
+
+// IsULID checks if the string is a ULID.
+//
+// Implementation got from:
+// https://github.com/oklog/ulid (Apache-2.0 License)
+//
+func IsULID(str string) bool {
+ // Check if a base32 encoded ULID is the right length.
+ if len(str) != ulidEncodedSize {
+ return false
+ }
+
+ // Check if all the characters in a base32 encoded ULID are part of the
+ // expected base32 character set.
+ if ulidDec[str[0]] == 0xFF ||
+ ulidDec[str[1]] == 0xFF ||
+ ulidDec[str[2]] == 0xFF ||
+ ulidDec[str[3]] == 0xFF ||
+ ulidDec[str[4]] == 0xFF ||
+ ulidDec[str[5]] == 0xFF ||
+ ulidDec[str[6]] == 0xFF ||
+ ulidDec[str[7]] == 0xFF ||
+ ulidDec[str[8]] == 0xFF ||
+ ulidDec[str[9]] == 0xFF ||
+ ulidDec[str[10]] == 0xFF ||
+ ulidDec[str[11]] == 0xFF ||
+ ulidDec[str[12]] == 0xFF ||
+ ulidDec[str[13]] == 0xFF ||
+ ulidDec[str[14]] == 0xFF ||
+ ulidDec[str[15]] == 0xFF ||
+ ulidDec[str[16]] == 0xFF ||
+ ulidDec[str[17]] == 0xFF ||
+ ulidDec[str[18]] == 0xFF ||
+ ulidDec[str[19]] == 0xFF ||
+ ulidDec[str[20]] == 0xFF ||
+ ulidDec[str[21]] == 0xFF ||
+ ulidDec[str[22]] == 0xFF ||
+ ulidDec[str[23]] == 0xFF ||
+ ulidDec[str[24]] == 0xFF ||
+ ulidDec[str[25]] == 0xFF {
+ return false
+ }
+
+ // Check if the first character in a base32 encoded ULID will overflow. This
+ // happens because the base32 representation encodes 130 bits, while the
+ // ULID is only 128 bits.
+ //
+ // See https://github.com/oklog/ulid/issues/9 for details.
+ if str[0] > '7' {
+ return false
+ }
+ return true
+}
+
+// IsCreditCard checks if the string is a credit card.
+func IsCreditCard(str string) bool {
+ sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
+ if !rxCreditCard.MatchString(sanitized) {
+ return false
+ }
+
+ number, _ := ToInt(sanitized)
+ number, lastDigit := number / 10, number % 10
+
+ var sum int64
+ for i:=0; number > 0; i++ {
+ digit := number % 10
+
+ if i % 2 == 0 {
+ digit *= 2
+ if digit > 9 {
+ digit -= 9
+ }
+ }
+
+ sum += digit
+ number = number / 10
+ }
+
+ return (sum + lastDigit) % 10 == 0
+}
+
+// IsISBN10 checks if the string is an ISBN version 10.
+func IsISBN10(str string) bool {
+ return IsISBN(str, 10)
+}
+
+// IsISBN13 checks if the string is an ISBN version 13.
+func IsISBN13(str string) bool {
+ return IsISBN(str, 13)
+}
+
+// IsISBN checks if the string is an ISBN (version 10 or 13).
+// If version value is not equal to 10 or 13, it will be checks both variants.
+func IsISBN(str string, version int) bool {
+ sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
+ var checksum int32
+ var i int32
+ if version == 10 {
+ if !rxISBN10.MatchString(sanitized) {
+ return false
+ }
+ for i = 0; i < 9; i++ {
+ checksum += (i + 1) * int32(sanitized[i]-'0')
+ }
+ if sanitized[9] == 'X' {
+ checksum += 10 * 10
+ } else {
+ checksum += 10 * int32(sanitized[9]-'0')
+ }
+ if checksum%11 == 0 {
+ return true
+ }
+ return false
+ } else if version == 13 {
+ if !rxISBN13.MatchString(sanitized) {
+ return false
+ }
+ factor := []int32{1, 3}
+ for i = 0; i < 12; i++ {
+ checksum += factor[i%2] * int32(sanitized[i]-'0')
+ }
+ return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0
+ }
+ return IsISBN(str, 10) || IsISBN(str, 13)
+}
+
+// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal).
+func IsJSON(str string) bool {
+ var js json.RawMessage
+ return json.Unmarshal([]byte(str), &js) == nil
+}
+
+// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid.
+func IsMultibyte(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxMultibyte.MatchString(str)
+}
+
+// IsASCII checks if the string contains ASCII chars only. Empty string is valid.
+func IsASCII(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxASCII.MatchString(str)
+}
+
+// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid.
+func IsPrintableASCII(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxPrintableASCII.MatchString(str)
+}
+
+// IsFullWidth checks if the string contains any full-width chars. Empty string is valid.
+func IsFullWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxFullWidth.MatchString(str)
+}
+
+// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid.
+func IsHalfWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHalfWidth.MatchString(str)
+}
+
+// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid.
+func IsVariableWidth(str string) bool {
+ if IsNull(str) {
+ return true
+ }
+ return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str)
+}
+
+// IsBase64 checks if a string is base64 encoded.
+func IsBase64(str string) bool {
+ return rxBase64.MatchString(str)
+}
+
+// IsFilePath checks is a string is Win or Unix file path and returns it's type.
+func IsFilePath(str string) (bool, int) {
+ if rxWinPath.MatchString(str) {
+ //check windows path limit see:
+ // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
+ if len(str[3:]) > 32767 {
+ return false, Win
+ }
+ return true, Win
+ } else if rxUnixPath.MatchString(str) {
+ return true, Unix
+ }
+ return false, Unknown
+}
+
+//IsWinFilePath checks both relative & absolute paths in Windows
+func IsWinFilePath(str string) bool {
+ if rxARWinPath.MatchString(str) {
+ //check windows path limit see:
+ // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
+ if len(str[3:]) > 32767 {
+ return false
+ }
+ return true
+ }
+ return false
+}
+
+//IsUnixFilePath checks both relative & absolute paths in Unix
+func IsUnixFilePath(str string) bool {
+ if rxARUnixPath.MatchString(str) {
+ return true
+ }
+ return false
+}
+
+// IsDataURI checks if a string is base64 encoded data URI such as an image
+func IsDataURI(str string) bool {
+ dataURI := strings.Split(str, ",")
+ if !rxDataURI.MatchString(dataURI[0]) {
+ return false
+ }
+ return IsBase64(dataURI[1])
+}
+
+// IsMagnetURI checks if a string is valid magnet URI
+func IsMagnetURI(str string) bool {
+ return rxMagnetURI.MatchString(str)
+}
+
+// IsISO3166Alpha2 checks if a string is valid two-letter country code
+func IsISO3166Alpha2(str string) bool {
+ for _, entry := range ISO3166List {
+ if str == entry.Alpha2Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO3166Alpha3 checks if a string is valid three-letter country code
+func IsISO3166Alpha3(str string) bool {
+ for _, entry := range ISO3166List {
+ if str == entry.Alpha3Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO693Alpha2 checks if a string is valid two-letter language code
+func IsISO693Alpha2(str string) bool {
+ for _, entry := range ISO693List {
+ if str == entry.Alpha2Code {
+ return true
+ }
+ }
+ return false
+}
+
+// IsISO693Alpha3b checks if a string is valid three-letter language code
+func IsISO693Alpha3b(str string) bool {
+ for _, entry := range ISO693List {
+ if str == entry.Alpha3bCode {
+ return true
+ }
+ }
+ return false
+}
+
+// IsDNSName will validate the given string as a DNS name
+func IsDNSName(str string) bool {
+ if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 {
+ // constraints already violated
+ return false
+ }
+ return !IsIP(str) && rxDNSName.MatchString(str)
+}
+
+// IsHash checks if a string is a hash of type algorithm.
+// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b']
+func IsHash(str string, algorithm string) bool {
+ var len string
+ algo := strings.ToLower(algorithm)
+
+ if algo == "crc32" || algo == "crc32b" {
+ len = "8"
+ } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" {
+ len = "32"
+ } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" {
+ len = "40"
+ } else if algo == "tiger192" {
+ len = "48"
+ } else if algo == "sha3-224" {
+ len = "56"
+ } else if algo == "sha256" || algo == "sha3-256" {
+ len = "64"
+ } else if algo == "sha384" || algo == "sha3-384" {
+ len = "96"
+ } else if algo == "sha512" || algo == "sha3-512" {
+ len = "128"
+ } else {
+ return false
+ }
+
+ return Matches(str, "^[a-f0-9]{"+len+"}$")
+}
+
+// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")`
+func IsSHA3224(str string) bool {
+ return IsHash(str, "sha3-224")
+}
+
+// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")`
+func IsSHA3256(str string) bool {
+ return IsHash(str, "sha3-256")
+}
+
+// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")`
+func IsSHA3384(str string) bool {
+ return IsHash(str, "sha3-384")
+}
+
+// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")`
+func IsSHA3512(str string) bool {
+ return IsHash(str, "sha3-512")
+}
+
+// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")`
+func IsSHA512(str string) bool {
+ return IsHash(str, "sha512")
+}
+
+// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")`
+func IsSHA384(str string) bool {
+ return IsHash(str, "sha384")
+}
+
+// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")`
+func IsSHA256(str string) bool {
+ return IsHash(str, "sha256")
+}
+
+// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")`
+func IsTiger192(str string) bool {
+ return IsHash(str, "tiger192")
+}
+
+// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")`
+func IsTiger160(str string) bool {
+ return IsHash(str, "tiger160")
+}
+
+// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")`
+func IsRipeMD160(str string) bool {
+ return IsHash(str, "ripemd160")
+}
+
+// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")`
+func IsSHA1(str string) bool {
+ return IsHash(str, "sha1")
+}
+
+// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")`
+func IsTiger128(str string) bool {
+ return IsHash(str, "tiger128")
+}
+
+// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")`
+func IsRipeMD128(str string) bool {
+ return IsHash(str, "ripemd128")
+}
+
+// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")`
+func IsCRC32(str string) bool {
+ return IsHash(str, "crc32")
+}
+
+// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")`
+func IsCRC32b(str string) bool {
+ return IsHash(str, "crc32b")
+}
+
+// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")`
+func IsMD5(str string) bool {
+ return IsHash(str, "md5")
+}
+
+// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")`
+func IsMD4(str string) bool {
+ return IsHash(str, "md4")
+}
+
+// IsDialString validates the given string for usage with the various Dial() functions
+func IsDialString(str string) bool {
+ if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) {
+ return true
+ }
+
+ return false
+}
+
+// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP`
+func IsIP(str string) bool {
+ return net.ParseIP(str) != nil
+}
+
+// IsPort checks if a string represents a valid port
+func IsPort(str string) bool {
+ if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 {
+ return true
+ }
+ return false
+}
+
+// IsIPv4 checks if the string is an IP version 4.
+func IsIPv4(str string) bool {
+ ip := net.ParseIP(str)
+ return ip != nil && strings.Contains(str, ".")
+}
+
+// IsIPv6 checks if the string is an IP version 6.
+func IsIPv6(str string) bool {
+ ip := net.ParseIP(str)
+ return ip != nil && strings.Contains(str, ":")
+}
+
+// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6)
+func IsCIDR(str string) bool {
+ _, _, err := net.ParseCIDR(str)
+ return err == nil
+}
+
+// IsMAC checks if a string is valid MAC address.
+// Possible MAC formats:
+// 01:23:45:67:89:ab
+// 01:23:45:67:89:ab:cd:ef
+// 01-23-45-67-89-ab
+// 01-23-45-67-89-ab-cd-ef
+// 0123.4567.89ab
+// 0123.4567.89ab.cdef
+func IsMAC(str string) bool {
+ _, err := net.ParseMAC(str)
+ return err == nil
+}
+
+// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name
+func IsHost(str string) bool {
+ return IsIP(str) || IsDNSName(str)
+}
+
+// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId.
+func IsMongoID(str string) bool {
+ return rxHexadecimal.MatchString(str) && (len(str) == 24)
+}
+
+// IsLatitude checks if a string is valid latitude.
+func IsLatitude(str string) bool {
+ return rxLatitude.MatchString(str)
+}
+
+// IsLongitude checks if a string is valid longitude.
+func IsLongitude(str string) bool {
+ return rxLongitude.MatchString(str)
+}
+
+// IsIMEI checks if a string is valid IMEI
+func IsIMEI(str string) bool {
+ return rxIMEI.MatchString(str)
+}
+
+// IsIMSI checks if a string is valid IMSI
+func IsIMSI(str string) bool {
+ if !rxIMSI.MatchString(str) {
+ return false
+ }
+
+ mcc, err := strconv.ParseInt(str[0:3], 10, 32)
+ if err != nil {
+ return false
+ }
+
+ switch mcc {
+ case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219:
+ case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235:
+ case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257:
+ case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278:
+ case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293:
+ case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314:
+ case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346:
+ case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364:
+ case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402:
+ case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417:
+ case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428:
+ case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441:
+ case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467:
+ case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528:
+ case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545:
+ case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555:
+ case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611:
+ case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621:
+ case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631:
+ case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641:
+ case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652:
+ case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708:
+ case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736:
+ case 738, 740, 742, 744, 746, 748, 750, 995:
+ return true
+ default:
+ return false
+ }
+ return true
+}
+
+// IsRsaPublicKey checks if a string is valid public key with provided length
+func IsRsaPublicKey(str string, keylen int) bool {
+ bb := bytes.NewBufferString(str)
+ pemBytes, err := ioutil.ReadAll(bb)
+ if err != nil {
+ return false
+ }
+ block, _ := pem.Decode(pemBytes)
+ if block != nil && block.Type != "PUBLIC KEY" {
+ return false
+ }
+ var der []byte
+
+ if block != nil {
+ der = block.Bytes
+ } else {
+ der, err = base64.StdEncoding.DecodeString(str)
+ if err != nil {
+ return false
+ }
+ }
+
+ key, err := x509.ParsePKIXPublicKey(der)
+ if err != nil {
+ return false
+ }
+ pubkey, ok := key.(*rsa.PublicKey)
+ if !ok {
+ return false
+ }
+ bitlen := len(pubkey.N.Bytes()) * 8
+ return bitlen == int(keylen)
+}
+
+// IsRegex checks if a give string is a valid regex with RE2 syntax or not
+func IsRegex(str string) bool {
+ if _, err := regexp.Compile(str); err == nil {
+ return true
+ }
+ return false
+}
+
+func toJSONName(tag string) string {
+ if tag == "" {
+ return ""
+ }
+
+ // JSON name always comes first. If there's no options then split[0] is
+ // JSON name, if JSON name is not set, then split[0] is an empty string.
+ split := strings.SplitN(tag, ",", 2)
+
+ name := split[0]
+
+ // However it is possible that the field is skipped when
+ // (de-)serializing from/to JSON, in which case assume that there is no
+ // tag name to use
+ if name == "-" {
+ return ""
+ }
+ return name
+}
+
+func prependPathToErrors(err error, path string) error {
+ switch err2 := err.(type) {
+ case Error:
+ err2.Path = append([]string{path}, err2.Path...)
+ return err2
+ case Errors:
+ errors := err2.Errors()
+ for i, err3 := range errors {
+ errors[i] = prependPathToErrors(err3, path)
+ }
+ return err2
+ }
+ return err
+}
+
+// ValidateArray performs validation according to condition iterator that validates every element of the array
+func ValidateArray(array []interface{}, iterator ConditionIterator) bool {
+ return Every(array, iterator)
+}
+
+// ValidateMap use validation map for fields.
+// result will be equal to `false` if there are any errors.
+// s is the map containing the data to be validated.
+// m is the validation map in the form:
+// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}}
+func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) {
+ if s == nil {
+ return true, nil
+ }
+ result := true
+ var err error
+ var errs Errors
+ var index int
+ val := reflect.ValueOf(s)
+ for key, value := range s {
+ presentResult := true
+ validator, ok := m[key]
+ if !ok {
+ presentResult = false
+ var err error
+ err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key)
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ valueField := reflect.ValueOf(value)
+ mapResult := true
+ typeResult := true
+ structResult := true
+ resultField := true
+ switch subValidator := validator.(type) {
+ case map[string]interface{}:
+ var err error
+ if v, ok := value.(map[string]interface{}); !ok {
+ mapResult = false
+ err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String())
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ } else {
+ mapResult, err = ValidateMap(v, subValidator)
+ if err != nil {
+ mapResult = false
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ }
+ case string:
+ if (valueField.Kind() == reflect.Struct ||
+ (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) &&
+ subValidator != "-" {
+ var err error
+ structResult, err = ValidateStruct(valueField.Interface())
+ if err != nil {
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ }
+ resultField, err = typeCheck(valueField, reflect.StructField{
+ Name: key,
+ PkgPath: "",
+ Type: val.Type(),
+ Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)),
+ Offset: 0,
+ Index: []int{index},
+ Anonymous: false,
+ }, val, nil)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ case nil:
+ // already handlerd when checked before
+ default:
+ typeResult = false
+ err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String())
+ err = prependPathToErrors(err, key)
+ errs = append(errs, err)
+ }
+ result = result && presentResult && typeResult && resultField && structResult && mapResult
+ index++
+ }
+ // checks required keys
+ requiredResult := true
+ for key, value := range m {
+ if schema, ok := value.(string); ok {
+ tags := parseTagIntoMap(schema)
+ if required, ok := tags["required"]; ok {
+ if _, ok := s[key]; !ok {
+ requiredResult = false
+ if required.customErrorMessage != "" {
+ err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}}
+ } else {
+ err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}}
+ }
+ errs = append(errs, err)
+ }
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ err = errs
+ }
+ return result && requiredResult, err
+}
+
+// ValidateStruct use tags for fields.
+// result will be equal to `false` if there are any errors.
+// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail)
+func ValidateStruct(s interface{}) (bool, error) {
+ if s == nil {
+ return true, nil
+ }
+ result := true
+ var err error
+ val := reflect.ValueOf(s)
+ if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ // we only accept structs
+ if val.Kind() != reflect.Struct {
+ return false, fmt.Errorf("function only accepts structs; got %s", val.Kind())
+ }
+ var errs Errors
+ for i := 0; i < val.NumField(); i++ {
+ valueField := val.Field(i)
+ typeField := val.Type().Field(i)
+ if typeField.PkgPath != "" {
+ continue // Private field
+ }
+ structResult := true
+ if valueField.Kind() == reflect.Interface {
+ valueField = valueField.Elem()
+ }
+ if (valueField.Kind() == reflect.Struct ||
+ (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) &&
+ typeField.Tag.Get(tagName) != "-" {
+ var err error
+ structResult, err = ValidateStruct(valueField.Interface())
+ if err != nil {
+ err = prependPathToErrors(err, typeField.Name)
+ errs = append(errs, err)
+ }
+ }
+ resultField, err2 := typeCheck(valueField, typeField, val, nil)
+ if err2 != nil {
+
+ // Replace structure name with JSON name if there is a tag on the variable
+ jsonTag := toJSONName(typeField.Tag.Get("json"))
+ if jsonTag != "" {
+ switch jsonError := err2.(type) {
+ case Error:
+ jsonError.Name = jsonTag
+ err2 = jsonError
+ case Errors:
+ for i2, err3 := range jsonError {
+ switch customErr := err3.(type) {
+ case Error:
+ customErr.Name = jsonTag
+ jsonError[i2] = customErr
+ }
+ }
+
+ err2 = jsonError
+ }
+ }
+
+ errs = append(errs, err2)
+ }
+ result = result && resultField && structResult
+ }
+ if len(errs) > 0 {
+ err = errs
+ }
+ return result, err
+}
+
+// ValidateStructAsync performs async validation of the struct and returns results through the channels
+func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) {
+ res := make(chan bool)
+ errors := make(chan error)
+
+ go func() {
+ defer close(res)
+ defer close(errors)
+
+ isValid, isFailed := ValidateStruct(s)
+
+ res <- isValid
+ errors <- isFailed
+ }()
+
+ return res, errors
+}
+
+// ValidateMapAsync performs async validation of the map and returns results through the channels
+func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) {
+ res := make(chan bool)
+ errors := make(chan error)
+
+ go func() {
+ defer close(res)
+ defer close(errors)
+
+ isValid, isFailed := ValidateMap(s, m)
+
+ res <- isValid
+ errors <- isFailed
+ }()
+
+ return res, errors
+}
+
+// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""}
+func parseTagIntoMap(tag string) tagOptionsMap {
+ optionsMap := make(tagOptionsMap)
+ options := strings.Split(tag, ",")
+
+ for i, option := range options {
+ option = strings.TrimSpace(option)
+
+ validationOptions := strings.Split(option, "~")
+ if !isValidTag(validationOptions[0]) {
+ continue
+ }
+ if len(validationOptions) == 2 {
+ optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i}
+ } else {
+ optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i}
+ }
+ }
+ return optionsMap
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// IsSSN will validate the given string as a U.S. Social Security Number
+func IsSSN(str string) bool {
+ if str == "" || len(str) != 11 {
+ return false
+ }
+ return rxSSN.MatchString(str)
+}
+
+// IsSemver checks if string is valid semantic version
+func IsSemver(str string) bool {
+ return rxSemver.MatchString(str)
+}
+
+// IsType checks if interface is of some type
+func IsType(v interface{}, params ...string) bool {
+ if len(params) == 1 {
+ typ := params[0]
+ return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1)
+ }
+ return false
+}
+
+// IsTime checks if string is valid according to given format
+func IsTime(str string, format string) bool {
+ _, err := time.Parse(format, str)
+ return err == nil
+}
+
+// IsUnixTime checks if string is valid unix timestamp value
+func IsUnixTime(str string) bool {
+ if _, err := strconv.Atoi(str); err == nil {
+ return true
+ }
+ return false
+}
+
+// IsRFC3339 checks if string is valid timestamp value according to RFC3339
+func IsRFC3339(str string) bool {
+ return IsTime(str, time.RFC3339)
+}
+
+// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone.
+func IsRFC3339WithoutZone(str string) bool {
+ return IsTime(str, rfc3339WithoutZone)
+}
+
+// IsISO4217 checks if string is valid ISO currency code
+func IsISO4217(str string) bool {
+ for _, currency := range ISO4217List {
+ if str == currency {
+ return true
+ }
+ }
+
+ return false
+}
+
+// ByteLength checks string's length
+func ByteLength(str string, params ...string) bool {
+ if len(params) == 2 {
+ min, _ := ToInt(params[0])
+ max, _ := ToInt(params[1])
+ return len(str) >= int(min) && len(str) <= int(max)
+ }
+
+ return false
+}
+
+// RuneLength checks string's length
+// Alias for StringLength
+func RuneLength(str string, params ...string) bool {
+ return StringLength(str, params...)
+}
+
+// IsRsaPub checks whether string is valid RSA key
+// Alias for IsRsaPublicKey
+func IsRsaPub(str string, params ...string) bool {
+ if len(params) == 1 {
+ len, _ := ToInt(params[0])
+ return IsRsaPublicKey(str, int(len))
+ }
+
+ return false
+}
+
+// StringMatches checks if a string matches a given pattern.
+func StringMatches(s string, params ...string) bool {
+ if len(params) == 1 {
+ pattern := params[0]
+ return Matches(s, pattern)
+ }
+ return false
+}
+
+// StringLength checks string's length (including multi byte strings)
+func StringLength(str string, params ...string) bool {
+
+ if len(params) == 2 {
+ strLength := utf8.RuneCountInString(str)
+ min, _ := ToInt(params[0])
+ max, _ := ToInt(params[1])
+ return strLength >= int(min) && strLength <= int(max)
+ }
+
+ return false
+}
+
+// MinStringLength checks string's minimum length (including multi byte strings)
+func MinStringLength(str string, params ...string) bool {
+
+ if len(params) == 1 {
+ strLength := utf8.RuneCountInString(str)
+ min, _ := ToInt(params[0])
+ return strLength >= int(min)
+ }
+
+ return false
+}
+
+// MaxStringLength checks string's maximum length (including multi byte strings)
+func MaxStringLength(str string, params ...string) bool {
+
+ if len(params) == 1 {
+ strLength := utf8.RuneCountInString(str)
+ max, _ := ToInt(params[0])
+ return strLength <= int(max)
+ }
+
+ return false
+}
+
+// Range checks string's length
+func Range(str string, params ...string) bool {
+ if len(params) == 2 {
+ value, _ := ToFloat(str)
+ min, _ := ToFloat(params[0])
+ max, _ := ToFloat(params[1])
+ return InRange(value, min, max)
+ }
+
+ return false
+}
+
+// IsInRaw checks if string is in list of allowed values
+func IsInRaw(str string, params ...string) bool {
+ if len(params) == 1 {
+ rawParams := params[0]
+
+ parsedParams := strings.Split(rawParams, "|")
+
+ return IsIn(str, parsedParams...)
+ }
+
+ return false
+}
+
+// IsIn checks if string str is a member of the set of strings params
+func IsIn(str string, params ...string) bool {
+ for _, param := range params {
+ if str == param {
+ return true
+ }
+ }
+
+ return false
+}
+
+func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) {
+ if nilPtrAllowedByRequired {
+ k := v.Kind()
+ if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() {
+ return true, nil
+ }
+ }
+
+ if requiredOption, isRequired := options["required"]; isRequired {
+ if len(requiredOption.customErrorMessage) > 0 {
+ return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}}
+ } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional {
+ return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}}
+ }
+ // not required and empty is valid
+ return true, nil
+}
+
+func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) {
+ if !v.IsValid() {
+ return false, nil
+ }
+
+ tag := t.Tag.Get(tagName)
+
+ // checks if the field should be ignored
+ switch tag {
+ case "":
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Map {
+ if !fieldsRequiredByDefault {
+ return true, nil
+ }
+ return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}}
+ }
+ case "-":
+ return true, nil
+ }
+
+ isRootType := false
+ if options == nil {
+ isRootType = true
+ options = parseTagIntoMap(tag)
+ }
+
+ if isEmptyValue(v) {
+ // an empty value is not validated, checks only required
+ isValid, resultErr = checkRequired(v, t, options)
+ for key := range options {
+ delete(options, key)
+ }
+ return isValid, resultErr
+ }
+
+ var customTypeErrors Errors
+ optionsOrder := options.orderedKeys()
+ for _, validatorName := range optionsOrder {
+ validatorStruct := options[validatorName]
+ if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok {
+ delete(options, validatorName)
+
+ if result := validatefunc(v.Interface(), o.Interface()); !result {
+ if len(validatorStruct.customErrorMessage) > 0 {
+ customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)})
+ continue
+ }
+ customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)})
+ }
+ }
+ }
+
+ if len(customTypeErrors.Errors()) > 0 {
+ return false, customTypeErrors
+ }
+
+ if isRootType {
+ // Ensure that we've checked the value by all specified validators before report that the value is valid
+ defer func() {
+ delete(options, "optional")
+ delete(options, "required")
+
+ if isValid && resultErr == nil && len(options) != 0 {
+ optionsOrder := options.orderedKeys()
+ for _, validator := range optionsOrder {
+ isValid = false
+ resultErr = Error{t.Name, fmt.Errorf(
+ "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}}
+ return
+ }
+ }
+ }()
+ }
+
+ for _, validatorSpec := range optionsOrder {
+ validatorStruct := options[validatorSpec]
+ var negate bool
+ validator := validatorSpec
+ customMsgExists := len(validatorStruct.customErrorMessage) > 0
+
+ // checks whether the tag looks like '!something' or 'something'
+ if validator[0] == '!' {
+ validator = validator[1:]
+ negate = true
+ }
+
+ // checks for interface param validators
+ for key, value := range InterfaceParamTagRegexMap {
+ ps := value.FindStringSubmatch(validator)
+ if len(ps) == 0 {
+ continue
+ }
+
+ validatefunc, ok := InterfaceParamTagMap[key]
+ if !ok {
+ continue
+ }
+
+ delete(options, validatorSpec)
+
+ field := fmt.Sprint(v)
+ if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) {
+ if customMsgExists {
+ return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ if negate {
+ return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ // for each tag option checks the map of validator functions
+ for _, validatorSpec := range optionsOrder {
+ validatorStruct := options[validatorSpec]
+ var negate bool
+ validator := validatorSpec
+ customMsgExists := len(validatorStruct.customErrorMessage) > 0
+
+ // checks whether the tag looks like '!something' or 'something'
+ if validator[0] == '!' {
+ validator = validator[1:]
+ negate = true
+ }
+
+ // checks for param validators
+ for key, value := range ParamTagRegexMap {
+ ps := value.FindStringSubmatch(validator)
+ if len(ps) == 0 {
+ continue
+ }
+
+ validatefunc, ok := ParamTagMap[key]
+ if !ok {
+ continue
+ }
+
+ delete(options, validatorSpec)
+
+ switch v.Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64:
+
+ field := fmt.Sprint(v) // make value into string, then validate with regex
+ if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) {
+ if customMsgExists {
+ return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ if negate {
+ return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ default:
+ // type not yet supported, fail
+ return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}}
+ }
+ }
+
+ if validatefunc, ok := TagMap[validator]; ok {
+ delete(options, validatorSpec)
+
+ switch v.Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64:
+ field := fmt.Sprint(v) // make value into string, then validate with regex
+ if result := validatefunc(field); !result && !negate || result && negate {
+ if customMsgExists {
+ return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ if negate {
+ return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
+ }
+ default:
+ //Not Yet Supported Types (Fail here!)
+ err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v)
+ return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}}
+ }
+ }
+ }
+ return true, nil
+ case reflect.Map:
+ if v.Type().Key().Kind() != reflect.String {
+ return false, &UnsupportedTypeError{v.Type()}
+ }
+ var sv stringValues
+ sv = v.MapKeys()
+ sort.Sort(sv)
+ result := true
+ for i, k := range sv {
+ var resultItem bool
+ var err error
+ if v.MapIndex(k).Kind() != reflect.Struct {
+ resultItem, err = typeCheck(v.MapIndex(k), t, o, options)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ resultItem, err = ValidateStruct(v.MapIndex(k).Interface())
+ if err != nil {
+ err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string))
+ return false, err
+ }
+ }
+ result = result && resultItem
+ }
+ return result, nil
+ case reflect.Slice, reflect.Array:
+ result := true
+ for i := 0; i < v.Len(); i++ {
+ var resultItem bool
+ var err error
+ if v.Index(i).Kind() != reflect.Struct {
+ resultItem, err = typeCheck(v.Index(i), t, o, options)
+ if err != nil {
+ return false, err
+ }
+ } else {
+ resultItem, err = ValidateStruct(v.Index(i).Interface())
+ if err != nil {
+ err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i))
+ return false, err
+ }
+ }
+ result = result && resultItem
+ }
+ return result, nil
+ case reflect.Interface:
+ // If the value is an interface then encode its element
+ if v.IsNil() {
+ return true, nil
+ }
+ return ValidateStruct(v.Interface())
+ case reflect.Ptr:
+ // If the value is a pointer then checks its element
+ if v.IsNil() {
+ return true, nil
+ }
+ return typeCheck(v.Elem(), t, o, options)
+ case reflect.Struct:
+ return true, nil
+ default:
+ return false, &UnsupportedTypeError{v.Type()}
+ }
+}
+
+func stripParams(validatorString string) string {
+ return paramsRegexp.ReplaceAllString(validatorString, "")
+}
+
+// isEmptyValue checks whether value empty or not
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String, reflect.Array:
+ return v.Len() == 0
+ case reflect.Map, reflect.Slice:
+ return v.Len() == 0 || v.IsNil()
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+
+ return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())
+}
+
+// ErrorByField returns error for specified field of the struct
+// validated by ValidateStruct or empty string if there are no errors
+// or this field doesn't exists or doesn't have any errors.
+func ErrorByField(e error, field string) string {
+ if e == nil {
+ return ""
+ }
+ return ErrorsByField(e)[field]
+}
+
+// ErrorsByField returns map of errors of the struct validated
+// by ValidateStruct or empty map if there are no errors.
+func ErrorsByField(e error) map[string]string {
+ m := make(map[string]string)
+ if e == nil {
+ return m
+ }
+ // prototype for ValidateStruct
+
+ switch e := e.(type) {
+ case Error:
+ m[e.Name] = e.Err.Error()
+ case Errors:
+ for _, item := range e.Errors() {
+ n := ErrorsByField(item)
+ for k, v := range n {
+ m[k] = v
+ }
+ }
+ }
+
+ return m
+}
+
+// Error returns string equivalent for reflect.Type
+func (e *UnsupportedTypeError) Error() string {
+ return "validator: unsupported type: " + e.Type.String()
+}
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string { return sv[i].String() }
+
+func IsE164(str string) bool {
+ return rxE164.MatchString(str)
+}
diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml
new file mode 100644
index 00000000..bc5f7b08
--- /dev/null
+++ b/vendor/github.com/asaskevich/govalidator/wercker.yml
@@ -0,0 +1,15 @@
+box: golang
+build:
+ steps:
+ - setup-go-workspace
+
+ - script:
+ name: go get
+ code: |
+ go version
+ go get -t ./...
+
+ - script:
+ name: go test
+ code: |
+ go test -race -v ./...
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/.golangci.yml b/vendor/github.com/bradleyfalzon/ghinstallation/v2/.golangci.yml
new file mode 100644
index 00000000..44bb8765
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/.golangci.yml
@@ -0,0 +1,37 @@
+version: "2"
+linters:
+ default: none
+ enable:
+ - errcheck
+ - gocritic
+ - gocyclo
+ - gosec
+ - govet
+ - ineffassign
+ - misspell
+ - promlinter
+ - revive
+ - staticcheck
+ - unconvert
+ - unused
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/AUTHORS b/vendor/github.com/bradleyfalzon/ghinstallation/v2/AUTHORS
new file mode 100644
index 00000000..88ca0ddd
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/AUTHORS
@@ -0,0 +1,6 @@
+Billy Lynch
+Bradley Falzon
+Philippe Modard
+Ricardo Chimal, Jr
+Tatsuya Kamohara <17017563+kamontia@users.noreply.github.com>
+rob boll
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/LICENSE b/vendor/github.com/bradleyfalzon/ghinstallation/v2/LICENSE
new file mode 100644
index 00000000..1c508b07
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2019 ghinstallation AUTHORS
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/README.md b/vendor/github.com/bradleyfalzon/ghinstallation/v2/README.md
new file mode 100644
index 00000000..cf5ea50b
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/README.md
@@ -0,0 +1,110 @@
+# ghinstallation
+
+[](https://godoc.org/github.com/bradleyfalzon/ghinstallation/v2)
+
+`ghinstallation` provides `Transport`, which implements `http.RoundTripper` to
+provide authentication as an installation for GitHub Apps.
+
+This library is designed to provide automatic authentication for
+https://github.com/google/go-github or your own HTTP client.
+
+See
+https://developer.github.com/apps/building-integrations/setting-up-and-registering-github-apps/about-authentication-options-for-github-apps/
+
+# Installation
+
+Get the package:
+
+```bash
+GO111MODULE=on go get -u github.com/bradleyfalzon/ghinstallation/v2
+```
+
+# GitHub Example
+
+```go
+import "github.com/bradleyfalzon/ghinstallation/v2"
+
+func main() {
+ // Shared transport to reuse TCP connections.
+ tr := http.DefaultTransport
+
+ // Wrap the shared transport for use with the app ID 1 authenticating with installation ID 99.
+ itr, err := ghinstallation.NewKeyFromFile(tr, 1, 99, "2016-10-19.private-key.pem")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Use installation transport with github.com/google/go-github
+ client := github.NewClient(&http.Client{Transport: itr})
+}
+```
+
+You can also use [`New()`](https://pkg.go.dev/github.com/bradleyfalzon/ghinstallation/v2#New) to load a key directly from a `[]byte`.
+
+# GitHub Enterprise Example
+
+For clients using GitHub Enterprise, set the base URL as follows:
+
+```go
+import "github.com/bradleyfalzon/ghinstallation/v2"
+
+const GitHubEnterpriseURL = "https://github.example.com/api/v3"
+
+func main() {
+ // Shared transport to reuse TCP connections.
+ tr := http.DefaultTransport
+
+ // Wrap the shared transport for use with the app ID 1 authenticating with installation ID 99.
+ itr, err := ghinstallation.NewKeyFromFile(tr, 1, 99, "2016-10-19.private-key.pem")
+ if err != nil {
+ log.Fatal(err)
+ }
+ itr.BaseURL = GitHubEnterpriseURL
+
+ // Use installation transport with github.com/google/go-github
+ client := github.NewEnterpriseClient(GitHubEnterpriseURL, GitHubEnterpriseURL, &http.Client{Transport: itr})
+}
+```
+
+## What is app ID and installation ID
+
+`app ID` is the GitHub App ID. \
+You can check as following : \
+Settings > Developer > settings > GitHub App > About item
+
+`installation ID` is a part of WebHook request. \
+You can get the number to check the request. \
+Settings > Developer > settings > GitHub Apps > Advanced > Payload in Request
+tab
+
+```
+WebHook request
+...
+ "installation": {
+ "id": `installation ID`
+ }
+```
+
+# Customizing signing behavior
+
+Users can customize signing behavior by passing in a
+[Signer](https://pkg.go.dev/github.com/bradleyfalzon/ghinstallation/v2#Signer)
+implementation when creating an
+[AppsTransport](https://pkg.go.dev/github.com/bradleyfalzon/ghinstallation/v2#AppsTransport).
+For example, this can be used to create tokens backed by keys in a KMS system.
+
+```go
+signer := &myCustomSigner{
+ key: "https://url/to/key/vault",
+}
+atr := NewAppsTransportWithOptions(http.DefaultTransport, 1, WithSigner(signer))
+tr := NewFromAppsTransport(atr, 99)
+```
+
+# License
+
+[Apache 2.0](LICENSE)
+
+# Dependencies
+
+- [github.com/golang-jwt/jwt-go](https://github.com/golang-jwt/jwt-go)
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go
new file mode 100644
index 00000000..ada64bcf
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go
@@ -0,0 +1,121 @@
+package ghinstallation
+
+import (
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "strconv"
+ "time"
+
+ jwt "github.com/golang-jwt/jwt/v4"
+)
+
+// AppsTransport provides a http.RoundTripper by wrapping an existing
+// http.RoundTripper and provides GitHub Apps authentication as a
+// GitHub App.
+//
+// Client can also be overwritten, and is useful to change to one which
+// provides retry logic if you do experience retryable errors.
+//
+// See https://developer.github.com/apps/building-integrations/setting-up-and-registering-github-apps/about-authentication-options-for-github-apps/
+type AppsTransport struct {
+ BaseURL string // BaseURL is the scheme and host for GitHub API, defaults to https://api.github.com
+ Client Client // Client to use to refresh tokens, defaults to http.Client with provided transport
+ tr http.RoundTripper // tr is the underlying roundtripper being wrapped
+ signer Signer // signer signs JWT tokens.
+ appID int64 // appID is the GitHub App's ID
+}
+
+// NewAppsTransportKeyFromFile returns a AppsTransport using a private key from file.
+func NewAppsTransportKeyFromFile(tr http.RoundTripper, appID int64, privateKeyFile string) (*AppsTransport, error) {
+ privateKey, err := os.ReadFile(privateKeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read private key: %s", err)
+ }
+ return NewAppsTransport(tr, appID, privateKey)
+}
+
+// NewAppsTransport returns a AppsTransport using private key. The key is parsed
+// and if any errors occur the error is non-nil.
+//
+// The provided tr http.RoundTripper should be shared between multiple
+// installations to ensure reuse of underlying TCP connections.
+//
+// The returned Transport's RoundTrip method is safe to be used concurrently.
+func NewAppsTransport(tr http.RoundTripper, appID int64, privateKey []byte) (*AppsTransport, error) {
+ key, err := jwt.ParseRSAPrivateKeyFromPEM(privateKey)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse private key: %s", err)
+ }
+ return NewAppsTransportFromPrivateKey(tr, appID, key), nil
+}
+
+// NewAppsTransportFromPrivateKey returns an AppsTransport using a crypto/rsa.(*PrivateKey).
+func NewAppsTransportFromPrivateKey(tr http.RoundTripper, appID int64, key *rsa.PrivateKey) *AppsTransport {
+ return &AppsTransport{
+ BaseURL: apiBaseURL,
+ Client: &http.Client{Transport: tr},
+ tr: tr,
+ signer: NewRSASigner(jwt.SigningMethodRS256, key),
+ appID: appID,
+ }
+}
+
+func NewAppsTransportWithOptions(tr http.RoundTripper, appID int64, opts ...AppsTransportOption) (*AppsTransport, error) {
+ t := &AppsTransport{
+ BaseURL: apiBaseURL,
+ Client: &http.Client{Transport: tr},
+ tr: tr,
+ appID: appID,
+ }
+ for _, fn := range opts {
+ fn(t)
+ }
+
+ if t.signer == nil {
+ return nil, errors.New("no signer provided")
+ }
+
+ return t, nil
+}
+
+// RoundTrip implements http.RoundTripper interface.
+func (t *AppsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ // GitHub rejects expiry and issue timestamps that are not an integer,
+ // while the jwt-go library serializes to fractional timestamps.
+ // Truncate them before passing to jwt-go.
+ iss := time.Now().Add(-30 * time.Second).Truncate(time.Second)
+ exp := iss.Add(2 * time.Minute)
+ claims := &jwt.RegisteredClaims{
+ IssuedAt: jwt.NewNumericDate(iss),
+ ExpiresAt: jwt.NewNumericDate(exp),
+ Issuer: strconv.FormatInt(t.appID, 10),
+ }
+
+ ss, err := t.signer.Sign(claims)
+ if err != nil {
+ return nil, fmt.Errorf("could not sign jwt: %s", err)
+ }
+
+ req.Header.Set("Authorization", "Bearer "+ss)
+ req.Header.Add("Accept", acceptHeader)
+
+ resp, err := t.tr.RoundTrip(req)
+ return resp, err
+}
+
+// AppID returns the appID of the transport
+func (t *AppsTransport) AppID() int64 {
+ return t.appID
+}
+
+type AppsTransportOption func(*AppsTransport)
+
+// WithSigner configures the AppsTransport to use the given Signer for generating JWT tokens.
+func WithSigner(signer Signer) AppsTransportOption {
+ return func(at *AppsTransport) {
+ at.signer = signer
+ }
+}
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/sign.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/sign.go
new file mode 100644
index 00000000..928e10ef
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/sign.go
@@ -0,0 +1,33 @@
+package ghinstallation
+
+import (
+ "crypto/rsa"
+
+ jwt "github.com/golang-jwt/jwt/v4"
+)
+
+// Signer is a JWT token signer. This is a wrapper around [jwt.SigningMethod] with predetermined
+// key material.
+type Signer interface {
+ // Sign signs the given claims and returns a JWT token string, as specified
+ // by [jwt.Token.SignedString]
+ Sign(claims jwt.Claims) (string, error)
+}
+
+// RSASigner signs JWT tokens using RSA keys.
+type RSASigner struct {
+ method *jwt.SigningMethodRSA
+ key *rsa.PrivateKey
+}
+
+func NewRSASigner(method *jwt.SigningMethodRSA, key *rsa.PrivateKey) *RSASigner {
+ return &RSASigner{
+ method: method,
+ key: key,
+ }
+}
+
+// Sign signs the JWT claims with the RSA key.
+func (s *RSASigner) Sign(claims jwt.Claims) (string, error) {
+ return jwt.NewWithClaims(s.method, claims).SignedString(s.key)
+}
diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go
new file mode 100644
index 00000000..7794dd9b
--- /dev/null
+++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go
@@ -0,0 +1,276 @@
+package ghinstallation
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/google/go-github/v72/github"
+)
+
+const (
+ acceptHeader = "application/vnd.github.v3+json"
+ apiBaseURL = "https://api.github.com"
+)
+
+// Transport provides a http.RoundTripper by wrapping an existing
+// http.RoundTripper and provides GitHub Apps authentication as an
+// installation.
+//
+// Client can also be overwritten, and is useful to change to one which
+// provides retry logic if you do experience retryable errors.
+//
+// See https://developer.github.com/apps/building-integrations/setting-up-and-registering-github-apps/about-authentication-options-for-github-apps/
+type Transport struct {
+ BaseURL string // BaseURL is the scheme and host for GitHub API, defaults to https://api.github.com
+ Client Client // Client to use to refresh tokens, defaults to http.Client with provided transport
+ tr http.RoundTripper // tr is the underlying roundtripper being wrapped
+ appID int64 // appID is the GitHub App's ID
+ installationID int64 // installationID is the GitHub App Installation ID
+ InstallationTokenOptions *github.InstallationTokenOptions // parameters restrict a token's access
+ appsTransport *AppsTransport
+
+ mu *sync.Mutex // mu protects token
+ token *accessToken // token is the installation's access token
+}
+
+// accessToken is an installation access token response from GitHub
+type accessToken struct {
+ Token string `json:"token"`
+ ExpiresAt time.Time `json:"expires_at"`
+ Permissions github.InstallationPermissions `json:"permissions,omitempty"`
+ Repositories []github.Repository `json:"repositories,omitempty"`
+}
+
+// HTTPError represents a custom error for failing HTTP operations.
+// Example in our usecase: refresh access token operation.
+// It enables the caller to inspect the root cause and response.
+type HTTPError struct {
+ Message string
+ RootCause error
+ InstallationID int64
+ Response *http.Response
+}
+
+func (e *HTTPError) Error() string {
+ return e.Message
+}
+
+// Unwrap implements the standard library's error wrapping. It unwraps to the root cause.
+func (e *HTTPError) Unwrap() error {
+ return e.RootCause
+}
+
+var _ http.RoundTripper = &Transport{}
+
+// NewKeyFromFile returns a Transport using a private key from file.
+func NewKeyFromFile(tr http.RoundTripper, appID, installationID int64, privateKeyFile string) (*Transport, error) {
+ privateKey, err := os.ReadFile(privateKeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read private key: %s", err)
+ }
+ return New(tr, appID, installationID, privateKey)
+}
+
+// Client is a HTTP client which sends a http.Request and returns a http.Response
+// or an error.
+type Client interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// New returns an Transport using private key. The key is parsed
+// and if any errors occur the error is non-nil.
+//
+// The provided tr http.RoundTripper should be shared between multiple
+// installations to ensure reuse of underlying TCP connections.
+//
+// The returned Transport's RoundTrip method is safe to be used concurrently.
+func New(tr http.RoundTripper, appID, installationID int64, privateKey []byte) (*Transport, error) {
+ atr, err := NewAppsTransport(tr, appID, privateKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewFromAppsTransport(atr, installationID), nil
+}
+
+// NewFromAppsTransport returns a Transport using an existing *AppsTransport.
+func NewFromAppsTransport(atr *AppsTransport, installationID int64) *Transport {
+ return &Transport{
+ BaseURL: atr.BaseURL,
+ Client: &http.Client{Transport: atr.tr},
+ tr: atr.tr,
+ appID: atr.appID,
+ installationID: installationID,
+ appsTransport: atr,
+ mu: &sync.Mutex{},
+ }
+}
+
+// RoundTrip implements http.RoundTripper interface.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ reqBodyClosed := false
+ if req.Body != nil {
+ defer func() {
+ if !reqBodyClosed {
+ req.Body.Close()
+ }
+ }()
+ }
+
+ token, err := t.Token(req.Context())
+ if err != nil {
+ return nil, err
+ }
+
+ creq := cloneRequest(req) // per RoundTripper contract
+ creq.Header.Set("Authorization", "token "+token)
+
+ if creq.Header.Get("Accept") == "" { // We only add an "Accept" header to avoid overwriting the expected behavior.
+ creq.Header.Add("Accept", acceptHeader)
+ }
+ reqBodyClosed = true // req.Body is assumed to be closed by the tr RoundTripper.
+ resp, err := t.tr.RoundTrip(creq)
+ return resp, err
+}
+
+func (at *accessToken) getRefreshTime() time.Time {
+ return at.ExpiresAt.Add(-time.Minute)
+}
+
+func (at *accessToken) isExpired() bool {
+ return at == nil || at.getRefreshTime().Before(time.Now())
+}
+
+// Token checks the active token expiration and renews if necessary. Token returns
+// a valid access token. If renewal fails an error is returned.
+func (t *Transport) Token(ctx context.Context) (string, error) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.token.isExpired() {
+ // Token is not set or expired/nearly expired, so refresh
+ if err := t.refreshToken(ctx); err != nil {
+ return "", fmt.Errorf("could not refresh installation id %v's token: %w", t.installationID, err)
+ }
+ }
+
+ return t.token.Token, nil
+}
+
+// Permissions returns a transport token's GitHub installation permissions.
+func (t *Transport) Permissions() (github.InstallationPermissions, error) {
+ if t.token == nil {
+ return github.InstallationPermissions{}, fmt.Errorf("Permissions() = nil, err: nil token")
+ }
+ return t.token.Permissions, nil
+}
+
+// Repositories returns a transport token's GitHub repositories.
+func (t *Transport) Repositories() ([]github.Repository, error) {
+ if t.token == nil {
+ return nil, fmt.Errorf("Repositories() = nil, err: nil token")
+ }
+ return t.token.Repositories, nil
+}
+
+// Expiry returns a transport token's expiration time and refresh time. There is a small grace period
+// built in where a token will be refreshed before it expires. expiresAt is the actual token expiry,
+// and refreshAt is when a call to Token() will cause it to be refreshed.
+func (t *Transport) Expiry() (expiresAt time.Time, refreshAt time.Time, err error) {
+ if t.token == nil {
+ return time.Time{}, time.Time{}, errors.New("Expiry() = unknown, err: nil token")
+ }
+ return t.token.ExpiresAt, t.token.getRefreshTime(), nil
+}
+
+// AppID returns the app ID associated with the transport
+func (t *Transport) AppID() int64 {
+ return t.appID
+}
+
+// InstallationID returns the installation ID associated with the transport
+func (t *Transport) InstallationID() int64 {
+ return t.installationID
+}
+
+func (t *Transport) refreshToken(ctx context.Context) error {
+ // Convert InstallationTokenOptions into a ReadWriter to pass as an argument to http.NewRequest.
+ body, err := GetReadWriter(t.InstallationTokenOptions)
+ if err != nil {
+ return fmt.Errorf("could not convert installation token parameters into json: %s", err)
+ }
+
+ requestURL := fmt.Sprintf("%s/app/installations/%v/access_tokens", strings.TrimRight(t.BaseURL, "/"), t.installationID)
+ req, err := http.NewRequest("POST", requestURL, body)
+ if err != nil {
+ return fmt.Errorf("could not create request: %s", err)
+ }
+
+ // Set Content and Accept headers.
+ if body != nil {
+ req.Header.Set("Content-Type", "application/json")
+ }
+ req.Header.Set("Accept", acceptHeader)
+
+ if ctx != nil {
+ req = req.WithContext(ctx)
+ }
+
+ t.appsTransport.BaseURL = t.BaseURL
+ t.appsTransport.Client = t.Client
+ resp, err := t.appsTransport.RoundTrip(req)
+ e := &HTTPError{
+ RootCause: err,
+ InstallationID: t.installationID,
+ Response: resp,
+ }
+ if err != nil {
+ e.Message = fmt.Sprintf("could not get access_tokens from GitHub API for installation ID %v: %v", t.installationID, err)
+ return e
+ }
+
+ if resp.StatusCode/100 != 2 {
+ e.Message = fmt.Sprintf("received non 2xx response status %q when fetching %v", resp.Status, req.URL)
+ return e
+ }
+ // Closing body late, to provide caller a chance to inspect body in an error / non-200 response status situation
+ defer resp.Body.Close()
+
+ return json.NewDecoder(resp.Body).Decode(&t.token)
+}
+
+// GetReadWriter converts a body interface into an io.ReadWriter object.
+func GetReadWriter(i interface{}) (io.ReadWriter, error) {
+ var buf io.ReadWriter
+ if i != nil {
+ buf = new(bytes.Buffer)
+ enc := json.NewEncoder(buf)
+ err := enc.Encode(i)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return buf, nil
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+ return r2
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
index 8bf0e5b7..33c88305 100644
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
+- [Ristretto](https://github.com/dgraph-io/ristretto)
+- [Badger](https://github.com/dgraph-io/badger)
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
index a9e0d45c..78bddf1c 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -19,10 +19,13 @@ const (
// Store the primes in an array as well.
//
// The consts are used when possible in Go code to avoid MOVs but we need a
-// contiguous array of the assembly code.
+// contiguous array for the assembly code.
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
+//
+// Note that a zero-valued Digest is not ready to receive writes.
+// Call Reset or create a Digest using New before calling other methods.
type Digest struct {
v1 uint64
v2 uint64
@@ -33,19 +36,31 @@ type Digest struct {
n int // how much of mem is used
}
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
+// New creates a new Digest with a zero seed.
func New() *Digest {
+ return NewWithSeed(0)
+}
+
+// NewWithSeed creates a new Digest with the given seed.
+func NewWithSeed(seed uint64) *Digest {
var d Digest
- d.Reset()
+ d.ResetWithSeed(seed)
return &d
}
// Reset clears the Digest's state so that it can be reused.
+// It uses a seed value of zero.
func (d *Digest) Reset() {
- d.v1 = primes[0] + prime2
- d.v2 = prime2
- d.v3 = 0
- d.v4 = -primes[0]
+ d.ResetWithSeed(0)
+}
+
+// ResetWithSeed clears the Digest's state so that it can be reused.
+// It uses the given seed to initialize the state.
+func (d *Digest) ResetWithSeed(seed uint64) {
+ d.v1 = seed + prime1 + prime2
+ d.v2 = seed + prime2
+ d.v3 = seed
+ d.v4 = seed - prime1
d.total = 0
d.n = 0
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
index 9216e0a4..78f95f25 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -6,7 +6,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
//
//go:noescape
func Sum64(b []byte) uint64
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index 26df13bb..118e49e8 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -3,7 +3,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index e86f1b5f..05f5e7df 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -5,7 +5,7 @@
package xxhash
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 1c1638fd..cf9d42ae 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -33,7 +33,7 @@ import (
//
// See https://github.com/golang/go/issues/42739 for discussion.
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
diff --git a/vendor/github.com/cloudbase/garm-provider-common/LICENSE b/vendor/github.com/cloudbase/garm-provider-common/LICENSE
new file mode 100644
index 00000000..56ceea9b
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2023 Cloudbase Solutions SRL
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/cloudbase/garm-provider-common/defaults/defaults.go b/vendor/github.com/cloudbase/garm-provider-common/defaults/defaults.go
new file mode 100644
index 00000000..0feda5e3
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/defaults/defaults.go
@@ -0,0 +1,31 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package defaults
+
+const (
+ // DefaultUser is the default username that should exist on the instances.
+ DefaultUser = "runner"
+ // DefaultUserShell is the shell for the default user.
+ DefaultUserShell = "/bin/bash"
+)
+
+var (
+ // DefaultUserGroups are the groups the default user will be part of.
+ DefaultUserGroups = []string{
+ "sudo", "adm", "cdrom", "dialout",
+ "dip", "video", "plugdev", "netdev",
+ "docker", "lxd",
+ }
+)
diff --git a/vendor/github.com/cloudbase/garm-provider-common/errors/errors.go b/vendor/github.com/cloudbase/garm-provider-common/errors/errors.go
new file mode 100644
index 00000000..76e85d9c
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/errors/errors.go
@@ -0,0 +1,273 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package errors
+
+import "fmt"
+
+var (
+ // ErrUnauthorized is returned when a user does not have
+ // authorization to perform a request
+ ErrUnauthorized = NewUnauthorizedError("Unauthorized")
+ // ErrNotFound is returned if an object is not found in
+ // the database.
+ ErrNotFound = NewNotFoundError("not found")
+ // ErrDuplicateUser is returned when creating a user, if the
+ // user already exists.
+ ErrDuplicateEntity = NewDuplicateUserError("duplicate")
+ // ErrBadRequest is returned is a malformed request is sent
+ ErrBadRequest = NewBadRequestError("invalid request")
+ // ErrTimeout is returned when a timeout occurs.
+ ErrTimeout = NewTimeoutError("timed out")
+ ErrUnprocessable = NewUnprocessableError("cannot process request")
+ ErrNoPoolsAvailable = NewNoPoolsAvailableError("no pools available")
+)
+
+type baseError struct {
+ msg string
+}
+
+func (b *baseError) Error() string {
+ return b.msg
+}
+
+// NewProviderError returns a new ProviderError
+func NewProviderError(msg string, a ...interface{}) error {
+ return &ProviderError{
+ baseError{
+ msg: fmt.Sprintf(msg, a...),
+ },
+ }
+}
+
+// UnauthorizedError is returned when a request is unauthorized
+type ProviderError struct {
+ baseError
+}
+
+func (p *ProviderError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*ProviderError)
+ return ok
+}
+
+// NewMissingSecretError returns a new MissingSecretError
+func NewMissingSecretError(msg string, a ...interface{}) error {
+ return &MissingSecretError{
+ baseError{
+ msg: fmt.Sprintf(msg, a...),
+ },
+ }
+}
+
+// MissingSecretError is returned the secret to validate a webhook is missing
+type MissingSecretError struct {
+ baseError
+}
+
+func (p *MissingSecretError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*MissingSecretError)
+ return ok
+}
+
+// NewUnauthorizedError returns a new UnauthorizedError
+func NewUnauthorizedError(msg string) error {
+ return &UnauthorizedError{
+ baseError{
+ msg: msg,
+ },
+ }
+}
+
+// UnauthorizedError is returned when a request is unauthorized
+type UnauthorizedError struct {
+ baseError
+}
+
+func (p *UnauthorizedError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*UnauthorizedError)
+ return ok
+}
+
+// NewNotFoundError returns a new NotFoundError
+func NewNotFoundError(msg string, a ...interface{}) error {
+ return &NotFoundError{
+ baseError{
+ msg: fmt.Sprintf(msg, a...),
+ },
+ }
+}
+
+// NotFoundError is returned when a resource is not found
+type NotFoundError struct {
+ baseError
+}
+
+func (p *NotFoundError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*NotFoundError)
+ return ok
+}
+
+// NewDuplicateUserError returns a new DuplicateUserError
+func NewDuplicateUserError(msg string) error {
+ return &DuplicateUserError{
+ baseError{
+ msg: msg,
+ },
+ }
+}
+
+// DuplicateUserError is returned when a duplicate user is requested
+type DuplicateUserError struct {
+ baseError
+}
+
+func (p *DuplicateUserError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*DuplicateUserError)
+ return ok
+}
+
+// NewBadRequestError returns a new BadRequestError
+func NewBadRequestError(msg string, a ...interface{}) error {
+ return &BadRequestError{
+ baseError{
+ msg: fmt.Sprintf(msg, a...),
+ },
+ }
+}
+
+// BadRequestError is returned when a malformed request is received
+type BadRequestError struct {
+ baseError
+}
+
+func (p *BadRequestError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*BadRequestError)
+ return ok
+}
+
+// NewConflictError returns a new ConflictError
+func NewConflictError(msg string, a ...interface{}) error {
+ return &ConflictError{
+ baseError{
+ msg: fmt.Sprintf(msg, a...),
+ },
+ }
+}
+
+// ConflictError is returned when a conflicting request is made
+type ConflictError struct {
+ baseError
+}
+
+func (p *ConflictError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*ConflictError)
+ return ok
+}
+
+// NewTimeoutError returns a new TimoutError
+func NewTimeoutError(msg string, a ...interface{}) error {
+ return &TimoutError{
+ baseError{
+ msg: fmt.Sprintf(msg, a...),
+ },
+ }
+}
+
+// TimoutError is returned when an operation times out.
+type TimoutError struct {
+ baseError
+}
+
+func (p *TimoutError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*TimoutError)
+ return ok
+}
+
+// NewUnprocessableError returns a new UnprocessableError
+func NewUnprocessableError(msg string, a ...interface{}) error {
+ return &TimoutError{
+ baseError{
+ msg: fmt.Sprintf(msg, a...),
+ },
+ }
+}
+
+// TimoutError is returned when an operation times out.
+type UnprocessableError struct {
+ baseError
+}
+
+func (p *UnprocessableError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*UnprocessableError)
+ return ok
+}
+
+// NewNoPoolsAvailableError returns a new UnprocessableError
+func NewNoPoolsAvailableError(msg string, a ...interface{}) error {
+ return &TimoutError{
+ baseError{
+ msg: fmt.Sprintf(msg, a...),
+ },
+ }
+}
+
+// NoPoolsAvailableError is returned when anthere are not pools available.
+type NoPoolsAvailableError struct {
+ baseError
+}
+
+func (p *NoPoolsAvailableError) Is(target error) bool {
+ if target == nil {
+ return false
+ }
+
+ _, ok := target.(*NoPoolsAvailableError)
+ return ok
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/execution/common/commands.go b/vendor/github.com/cloudbase/garm-provider-common/execution/common/commands.go
new file mode 100644
index 00000000..c0e79805
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/execution/common/commands.go
@@ -0,0 +1,99 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ gErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm-provider-common/params"
+ "github.com/mattn/go-isatty"
+)
+
+type ExecutionCommand string
+
+const (
+ CreateInstanceCommand ExecutionCommand = "CreateInstance"
+ DeleteInstanceCommand ExecutionCommand = "DeleteInstance"
+ GetInstanceCommand ExecutionCommand = "GetInstance"
+ ListInstancesCommand ExecutionCommand = "ListInstances"
+ StartInstanceCommand ExecutionCommand = "StartInstance"
+ StopInstanceCommand ExecutionCommand = "StopInstance"
+ RemoveAllInstancesCommand ExecutionCommand = "RemoveAllInstances"
+ GetVersionCommand ExecutionCommand = "GetVersion"
+)
+
+// V0.1.1 commands
+const (
+ GetSupportedInterfaceVersionsCommand ExecutionCommand = "GetSupportedInterfaceVersions"
+ ValidatePoolInfoCommand ExecutionCommand = "ValidatePoolInfo"
+ GetConfigJSONSchemaCommand ExecutionCommand = "GetConfigJSONSchema"
+ GetExtraSpecsJSONSchemaCommand ExecutionCommand = "GetExtraSpecsJSONSchema"
+)
+
+const (
+ // ExitCodeNotFound is an exit code that indicates a Not Found error
+ ExitCodeNotFound int = 30
+ // ExitCodeDuplicate is an exit code that indicates a duplicate error
+ ExitCodeDuplicate int = 31
+)
+
+func GetBoostrapParamsFromStdin(c ExecutionCommand) (params.BootstrapInstance, error) {
+ var bootstrapParams params.BootstrapInstance
+ if c == CreateInstanceCommand {
+ if isatty.IsTerminal(os.Stdin.Fd()) || isatty.IsCygwinTerminal(os.Stdin.Fd()) {
+ return params.BootstrapInstance{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
+ }
+
+ var data bytes.Buffer
+ if _, err := io.Copy(&data, os.Stdin); err != nil {
+ return params.BootstrapInstance{}, fmt.Errorf("failed to copy bootstrap params")
+ }
+
+ if data.Len() == 0 {
+ return params.BootstrapInstance{}, fmt.Errorf("%s requires data passed into stdin", CreateInstanceCommand)
+ }
+
+ if err := json.Unmarshal(data.Bytes(), &bootstrapParams); err != nil {
+ return params.BootstrapInstance{}, fmt.Errorf("failed to decode instance params: %w", err)
+ }
+ if bootstrapParams.ExtraSpecs == nil {
+ // Initialize ExtraSpecs as an empty JSON object
+ bootstrapParams.ExtraSpecs = json.RawMessage([]byte("{}"))
+ }
+
+ return bootstrapParams, nil
+ }
+
+ // If the command is not CreateInstance, we don't need to read from stdin
+ return params.BootstrapInstance{}, nil
+}
+
+func ResolveErrorToExitCode(err error) int {
+ if err != nil {
+ if errors.Is(err, gErrors.ErrNotFound) {
+ return ExitCodeNotFound
+ } else if errors.Is(err, gErrors.ErrDuplicateEntity) {
+ return ExitCodeDuplicate
+ }
+ return 1
+ }
+ return 0
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/execution/common/interface.go b/vendor/github.com/cloudbase/garm-provider-common/execution/common/interface.go
new file mode 100644
index 00000000..d00afe92
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/execution/common/interface.go
@@ -0,0 +1,43 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+import (
+ "context"
+
+ "github.com/cloudbase/garm-provider-common/params"
+)
+
+// ExternalProvider defines a common interface that external providers need to implement.
+// This is very similar to the common.Provider interface, and was redefined here to
+// decouple it, in case it may diverge from native providers.
+type ExternalProvider interface {
+ // CreateInstance creates a new compute instance in the provider.
+ CreateInstance(ctx context.Context, bootstrapParams params.BootstrapInstance) (params.ProviderInstance, error)
+ // Delete instance will delete the instance in a provider.
+ DeleteInstance(ctx context.Context, instance string) error
+ // GetInstance will return details about one instance.
+ GetInstance(ctx context.Context, instance string) (params.ProviderInstance, error)
+ // ListInstances will list all instances for a provider.
+ ListInstances(ctx context.Context, poolID string) ([]params.ProviderInstance, error)
+ // RemoveAllInstances will remove all instances created by this provider.
+ RemoveAllInstances(ctx context.Context) error
+ // Stop shuts down the instance.
+ Stop(ctx context.Context, instance string, force bool) error
+ // Start boots up an instance.
+ Start(ctx context.Context, instance string) error
+ // GetVersion returns the version of the provider.
+ GetVersion(ctx context.Context) string
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/execution/common/versions.go b/vendor/github.com/cloudbase/garm-provider-common/execution/common/versions.go
new file mode 100644
index 00000000..ebdbbb8c
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/execution/common/versions.go
@@ -0,0 +1,22 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package common
+
+const (
+ // Version v0.1.0
+ Version010 = "v0.1.0"
+ // Version v0.1.1
+ Version011 = "v0.1.1"
+)
diff --git a/vendor/github.com/cloudbase/garm-provider-common/params/github.go b/vendor/github.com/cloudbase/garm-provider-common/params/github.go
new file mode 100644
index 00000000..c3e9a0a4
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/params/github.go
@@ -0,0 +1,75 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package params
+
+// RunnerApplicationDownload represents a binary for the self-hosted runner application that can be downloaded.
+// This is copied from the go-github package. It does not make sense to create a dependency on go-github just
+// for this struct.
+type RunnerApplicationDownload struct {
+ OS *string `json:"os,omitempty"`
+ Architecture *string `json:"architecture,omitempty"`
+ DownloadURL *string `json:"download_url,omitempty"`
+ Filename *string `json:"filename,omitempty"`
+ TempDownloadToken *string `json:"temp_download_token,omitempty"`
+ SHA256Checksum *string `json:"sha256_checksum,omitempty"`
+}
+
+// GetArchitecture returns the Architecture field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetArchitecture() string {
+ if r == nil || r.Architecture == nil {
+ return ""
+ }
+ return *r.Architecture
+}
+
+// GetDownloadURL returns the DownloadURL field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetDownloadURL() string {
+ if r == nil || r.DownloadURL == nil {
+ return ""
+ }
+ return *r.DownloadURL
+}
+
+// GetFilename returns the Filename field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetFilename() string {
+ if r == nil || r.Filename == nil {
+ return ""
+ }
+ return *r.Filename
+}
+
+// GetOS returns the OS field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetOS() string {
+ if r == nil || r.OS == nil {
+ return ""
+ }
+ return *r.OS
+}
+
+// GetSHA256Checksum returns the SHA256Checksum field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetSHA256Checksum() string {
+ if r == nil || r.SHA256Checksum == nil {
+ return ""
+ }
+ return *r.SHA256Checksum
+}
+
+// GetTempDownloadToken returns the TempDownloadToken field if it's non-nil, zero value otherwise.
+func (r *RunnerApplicationDownload) GetTempDownloadToken() string {
+ if r == nil || r.TempDownloadToken == nil {
+ return ""
+ }
+ return *r.TempDownloadToken
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/params/params.go b/vendor/github.com/cloudbase/garm-provider-common/params/params.go
new file mode 100644
index 00000000..0a63f709
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/params/params.go
@@ -0,0 +1,169 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package params
+
+import (
+ "encoding/json"
+)
+
+type (
+ AddressType string
+ InstanceStatus string
+ OSType string
+ OSArch string
+)
+
+const (
+ Windows OSType = "windows"
+ Linux OSType = "linux"
+ Unknown OSType = "unknown"
+)
+
+const (
+ Amd64 OSArch = "amd64"
+ I386 OSArch = "i386"
+ Arm64 OSArch = "arm64"
+ Arm OSArch = "arm"
+)
+
+const (
+ InstanceRunning InstanceStatus = "running"
+ InstanceStopped InstanceStatus = "stopped"
+ InstanceError InstanceStatus = "error"
+ InstancePendingDelete InstanceStatus = "pending_delete"
+ InstancePendingForceDelete InstanceStatus = "pending_force_delete"
+ InstanceDeleting InstanceStatus = "deleting"
+ InstanceDeleted InstanceStatus = "deleted"
+ InstancePendingCreate InstanceStatus = "pending_create"
+ InstanceCreating InstanceStatus = "creating"
+ InstanceStatusUnknown InstanceStatus = "unknown"
+)
+
+const (
+ PublicAddress AddressType = "public"
+ PrivateAddress AddressType = "private"
+)
+
+type UserDataOptions struct {
+ DisableUpdatesOnBoot bool `json:"disable_updates_on_boot"`
+ ExtraPackages []string `json:"extra_packages"`
+ EnableBootDebug bool `json:"enable_boot_debug"`
+}
+
+type BootstrapInstance struct {
+ Name string `json:"name"`
+ Tools []RunnerApplicationDownload `json:"tools"`
+ // RepoURL is the URL the github runner agent needs to configure itself.
+ RepoURL string `json:"repo_url"`
+ // CallbackUrl is the URL where the instance can send a post, signaling
+ // progress or status.
+ CallbackURL string `json:"callback-url"`
+ // MetadataURL is the URL where instances can fetch information needed to set themselves up.
+ MetadataURL string `json:"metadata-url"`
+ // InstanceToken is the token that needs to be set by the instance in the headers
+ // in order to send updated back to the garm via CallbackURL.
+ InstanceToken string `json:"instance-token"`
+ // SSHKeys are the ssh public keys we may want to inject inside the runners, if the
+ // provider supports it.
+ SSHKeys []string `json:"ssh-keys"`
+ // ExtraSpecs is an opaque raw json that gets sent to the provider
+ // as part of the bootstrap params for instances. It can contain
+ // any kind of data needed by providers. The contents of this field means
+ // nothing to garm itself. We don't act on the information in this field at
+ // all. We only validate that it's a proper json.
+ ExtraSpecs json.RawMessage `json:"extra_specs,omitempty"`
+
+ // GitHubRunnerGroup is the github runner group in which the newly installed runner
+ // should be added to. The runner group must be created by someone with access to the
+ // enterprise.
+ GitHubRunnerGroup string `json:"github-runner-group"`
+
+ // CACertBundle is a CA certificate bundle which will be sent to instances and which
+ // will tipically be installed as a system wide trusted root CA. by either cloud-init
+ // or whatever mechanism the provider will use to set up the runner.
+ CACertBundle []byte `json:"ca-cert-bundle"`
+
+ // OSArch is the target OS CPU architecture of the runner.
+ OSArch OSArch `json:"arch"`
+
+ // OSType is the target OS platform of the runner (windows, linux).
+ OSType OSType `json:"os_type"`
+
+ // Flavor is the platform specific abstraction that defines what resources will be allocated
+ // to the runner (CPU, RAM, disk space, etc). This field is meaningful to the provider which
+ // handles the actual creation.
+ Flavor string `json:"flavor"`
+
+ // Image is the platform specific identifier of the operating system template that will be used
+ // to spin up a new machine.
+ Image string `json:"image"`
+
+ // Labels are a list of github runner labels that will be added to the runner.
+ Labels []string `json:"labels"`
+
+ // PoolID is the ID of the garm pool to which this runner belongs.
+ PoolID string `json:"pool_id"`
+
+ // UserDataOptions are the options for the user data generation.
+ UserDataOptions UserDataOptions `json:"user_data_options"`
+
+ // JitConfigEnabled is a flag that indicates if the runner should be configured to use
+ // just-in-time configuration. If set to true, providers must attempt to fetch the JIT configuration
+ // from the metadata service instead of the runner registration token. The runner registration token
+ // is not available if the runner is configured to use JIT.
+ JitConfigEnabled bool `json:"jit_config_enabled"`
+}
+
+type Address struct {
+ Address string `json:"address"`
+ Type AddressType `json:"type"`
+}
+
+type ProviderInstance struct {
+ // PeoviderID is the unique ID the provider associated
+ // with the compute instance. We use this to identify the
+ // instance in the provider.
+ ProviderID string `json:"provider_id,omitempty"`
+
+ // Name is the name associated with an instance. Depending on
+ // the provider, this may or may not be useful in the context of
+ // the provider, but we can use it internally to identify the
+ // instance.
+ Name string `json:"name,omitempty"`
+
+ // OSType is the operating system type. For now, only Linux and
+ // Windows are supported.
+ OSType OSType `json:"os_type,omitempty"`
+
+ // OSName is the name of the OS. Eg: ubuntu, centos, etc.
+ OSName string `json:"os_name,omitempty"`
+
+ // OSVersion is the version of the operating system.
+ OSVersion string `json:"os_version,omitempty"`
+
+ // OSArch is the operating system architecture.
+ OSArch OSArch `json:"os_arch,omitempty"`
+
+ // Addresses is a list of IP addresses the provider reports
+ // for this instance.
+ Addresses []Address `json:"addresses,omitempty"`
+
+ // Status is the status of the instance inside the provider (eg: running, stopped, etc)
+ Status InstanceStatus `json:"status,omitempty"`
+
+ // ProviderFault holds any error messages captured from the IaaS provider that is
+ // responsible for managing the lifecycle of the runner.
+ ProviderFault []byte `json:"provider_fault,omitempty"`
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec.go b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec.go
new file mode 100644
index 00000000..cc417f6d
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec.go
@@ -0,0 +1,39 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package exec
+
+import (
+ "bytes"
+ "context"
+ "os/exec"
+
+ "github.com/pkg/errors"
+)
+
+func Exec(ctx context.Context, providerBin string, stdinData []byte, environ []string) ([]byte, error) {
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+ c := exec.CommandContext(ctx, providerBin)
+ c.Env = environ
+ c.Stdin = bytes.NewBuffer(stdinData)
+ c.Stdout = stdout
+ c.Stderr = stderr
+
+ if err := c.Run(); err != nil {
+ return nil, errors.Wrapf(err, "provider binary failed with stdout: %s; stderr: %s", stdout.String(), stderr.String())
+ }
+
+ return stdout.Bytes(), nil
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_nix.go b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_nix.go
new file mode 100644
index 00000000..4aaea613
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_nix.go
@@ -0,0 +1,26 @@
+//go:build !windows
+// +build !windows
+
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package exec
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func IsExecutable(path string) bool {
+ return unix.Access(path, unix.X_OK) == nil
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_windows.go b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_windows.go
new file mode 100644
index 00000000..dfcc6225
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/exec/exec_windows.go
@@ -0,0 +1,32 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package exec
+
+import (
+ "os"
+ "strings"
+)
+
+func IsExecutable(path string) bool {
+ pathExt := os.Getenv("PATHEXT")
+ execList := strings.Split(pathExt, ";")
+ for _, ext := range execList {
+ if strings.HasSuffix(path, ext) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/seal.go b/vendor/github.com/cloudbase/garm-provider-common/util/seal.go
new file mode 100644
index 00000000..0033ce47
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/seal.go
@@ -0,0 +1,170 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package util
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/minio/sio"
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/hkdf"
+)
+
+type Envelope struct {
+ Nonce [32]byte `json:"nonce"`
+ Data []byte `json:"data"`
+}
+
+// Seal will encrypt the given data using a derived key from the given passphrase.
+// This function is meant to be used with small datasets like passwords, keys and
+// secrets of any type, before they are saved to disk.
+func Seal(data []byte, passphrase []byte) ([]byte, error) {
+ if len(passphrase) != 32 {
+ return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
+ }
+
+ var nonce [32]byte
+ if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
+ return nil, fmt.Errorf("failed to read random data: %w", err)
+ }
+
+ // derive an encryption key from the master key and the nonce
+ var key [32]byte
+ kdf := hkdf.New(sha256.New, passphrase, nonce[:], nil)
+ if _, err := io.ReadFull(kdf, key[:]); err != nil {
+ return nil, fmt.Errorf("failed to derive encryption key: %w", err)
+ }
+
+ input := bytes.NewReader(data)
+ output := bytes.NewBuffer(nil)
+
+ if _, err := sio.Encrypt(output, input, sio.Config{Key: key[:]}); err != nil {
+ return nil, fmt.Errorf("failed to encrypt data: %w", err)
+ }
+ envelope := Envelope{
+ Data: output.Bytes(),
+ Nonce: nonce,
+ }
+ asJs, err := json.Marshal(envelope)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal envelope: %w", err)
+ }
+ return asJs, nil
+}
+
+// Unseal will decrypt the given data using a derived key from the given passphrase.
+// This function is meant to be used with small datasets like passwords, keys and
+// secrets of any type, after they are read from disk.
+func Unseal(data []byte, passphrase []byte) ([]byte, error) {
+ if len(passphrase) != 32 {
+ return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
+ }
+
+ var envelope Envelope
+ if err := json.Unmarshal(data, &envelope); err != nil {
+ return Aes256Decode(data, string(passphrase))
+ }
+
+ // derive an encryption key from the master key and the nonce
+ var key [32]byte
+ kdf := hkdf.New(sha256.New, passphrase, envelope.Nonce[:], nil)
+ if _, err := io.ReadFull(kdf, key[:]); err != nil {
+ return nil, fmt.Errorf("failed to derive encryption key: %w", err)
+ }
+
+ input := bytes.NewReader(envelope.Data)
+ output := bytes.NewBuffer(nil)
+
+ if _, err := sio.Decrypt(output, input, sio.Config{Key: key[:]}); err != nil {
+ return nil, fmt.Errorf("failed to decrypt data: %w", err)
+ }
+
+ return output.Bytes(), nil
+}
+
+func Aes256Encode(target []byte, passphrase string) ([]byte, error) {
+ if len(passphrase) != 32 {
+ return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
+ }
+
+ block, err := aes.NewCipher([]byte(passphrase))
+ if err != nil {
+ return nil, errors.Wrap(err, "creating cipher")
+ }
+
+ aesgcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return nil, errors.Wrap(err, "creating new aead")
+ }
+
+ nonce := make([]byte, aesgcm.NonceSize())
+ if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
+ return nil, errors.Wrap(err, "creating nonce")
+ }
+
+ ciphertext := aesgcm.Seal(nonce, nonce, target, nil)
+ return ciphertext, nil
+}
+
+func Aes256EncodeString(target string, passphrase string) ([]byte, error) {
+ if len(passphrase) != 32 {
+ return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
+ }
+
+ return Aes256Encode([]byte(target), passphrase)
+}
+
+func Aes256Decode(target []byte, passphrase string) ([]byte, error) {
+ if len(passphrase) != 32 {
+ return nil, fmt.Errorf("invalid passphrase length (expected length 32 characters)")
+ }
+
+ block, err := aes.NewCipher([]byte(passphrase))
+ if err != nil {
+ return nil, errors.Wrap(err, "creating cipher")
+ }
+
+ aesgcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return nil, errors.Wrap(err, "creating new aead")
+ }
+
+ nonceSize := aesgcm.NonceSize()
+ if len(target) < nonceSize {
+ return nil, fmt.Errorf("failed to decrypt text")
+ }
+
+ nonce, ciphertext := target[:nonceSize], target[nonceSize:]
+ plaintext, err := aesgcm.Open(nil, nonce, ciphertext, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decrypt text")
+ }
+ return plaintext, nil
+}
+
+func Aes256DecodeString(target []byte, passphrase string) (string, error) {
+ data, err := Aes256Decode(target, passphrase)
+ if err != nil {
+ return "", err
+ }
+ return string(data), nil
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/util.go b/vendor/github.com/cloudbase/garm-provider-common/util/util.go
new file mode 100644
index 00000000..36ce09ba
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/util.go
@@ -0,0 +1,327 @@
+// Copyright 2023 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package util
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/rand"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/big"
+ "net/http"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+ "unicode"
+ "unicode/utf16"
+
+ runnerErrors "github.com/cloudbase/garm-provider-common/errors"
+ "github.com/cloudbase/garm-provider-common/params"
+
+ "github.com/google/uuid"
+ gorillaHandlers "github.com/gorilla/handlers"
+ "github.com/pkg/errors"
+ "github.com/teris-io/shortid"
+ "golang.org/x/crypto/bcrypt"
+ lumberjack "gopkg.in/natefinch/lumberjack.v2"
+)
+
+const alphanumeric = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+
+// From: https://www.alexedwards.net/blog/validation-snippets-for-go#email-validation
+var rxEmail = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
+
+var (
+ OSToOSTypeMap map[string]params.OSType = map[string]params.OSType{
+ "almalinux": params.Linux,
+ "alma": params.Linux,
+ "alpine": params.Linux,
+ "archlinux": params.Linux,
+ "arch": params.Linux,
+ "centos": params.Linux,
+ "ubuntu": params.Linux,
+ "rhel": params.Linux,
+ "suse": params.Linux,
+ "opensuse": params.Linux,
+ "fedora": params.Linux,
+ "debian": params.Linux,
+ "flatcar": params.Linux,
+ "gentoo": params.Linux,
+ "rockylinux": params.Linux,
+ "rocky": params.Linux,
+ "windows": params.Windows,
+ }
+
+ githubArchMapping map[string]string = map[string]string{
+ "x86_64": "x64",
+ "amd64": "x64",
+ "armv7l": "arm",
+ "aarch64": "arm64",
+ "x64": "x64",
+ "arm": "arm",
+ "arm64": "arm64",
+ }
+
+ githubOSTypeMap map[string]string = map[string]string{
+ "linux": "linux",
+ "windows": "win",
+ }
+
+ //
+ githubOSTag = map[params.OSType]string{
+ params.Linux: "Linux",
+ params.Windows: "Windows",
+ }
+)
+
+// ResolveToGithubArch returns the cpu architecture as it is defined in the GitHub
+// tools download list. We use it to find the proper tools for the OS/Arch combo we're
+// deploying.
+func ResolveToGithubArch(arch string) (string, error) {
+ ghArch, ok := githubArchMapping[arch]
+ if !ok {
+ return "", runnerErrors.NewNotFoundError("arch %s is unknown", arch)
+ }
+
+ return ghArch, nil
+}
+
+// ResolveToGithubArch returns the OS type as it is defined in the GitHub
+// tools download list. We use it to find the proper tools for the OS/Arch combo we're
+// deploying.
+func ResolveToGithubOSType(osType string) (string, error) {
+ ghOS, ok := githubOSTypeMap[osType]
+ if !ok {
+ return "", runnerErrors.NewNotFoundError("os %s is unknown", osType)
+ }
+
+ return ghOS, nil
+}
+
+// ResolveToGithubTag returns the default OS tag that self hosted runners automatically
+// (and forcefully) adds to every runner that gets deployed. We need to keep track of those
+// tags internally as well.
+func ResolveToGithubTag(os params.OSType) (string, error) {
+ ghOS, ok := githubOSTag[os]
+ if !ok {
+ return "", runnerErrors.NewNotFoundError("os %s is unknown", os)
+ }
+
+ return ghOS, nil
+}
+
+// IsValidEmail returs a bool indicating if an email is valid
+func IsValidEmail(email string) bool {
+ if len(email) > 254 || !rxEmail.MatchString(email) {
+ return false
+ }
+ return true
+}
+
+func IsAlphanumeric(s string) bool {
+ for _, r := range s {
+ if !unicode.IsLetter(r) && !unicode.IsNumber(r) {
+ return false
+ }
+ }
+ return true
+}
+
+// GetLoggingWriter returns a new io.Writer suitable for logging.
+func GetLoggingWriter(logFile string) (io.Writer, error) {
+ var writer io.Writer = os.Stdout
+ if logFile != "" {
+ dirname := path.Dir(logFile)
+ if _, err := os.Stat(dirname); err != nil {
+ if !os.IsNotExist(err) {
+ return nil, fmt.Errorf("failed to create log folder")
+ }
+ if err := os.MkdirAll(dirname, 0o711); err != nil {
+ return nil, fmt.Errorf("failed to create log folder")
+ }
+ }
+ writer = &lumberjack.Logger{
+ Filename: logFile,
+ MaxSize: 500, // megabytes
+ MaxBackups: 3,
+ MaxAge: 28, // days
+ Compress: true, // disabled by default
+ }
+ }
+ return writer, nil
+}
+
+func ConvertFileToBase64(file string) (string, error) {
+ bytes, err := os.ReadFile(file)
+ if err != nil {
+ return "", errors.Wrap(err, "reading file")
+ }
+
+ return base64.StdEncoding.EncodeToString(bytes), nil
+}
+
+func OSToOSType(os string) (params.OSType, error) {
+ osType, ok := OSToOSTypeMap[strings.ToLower(os)]
+ if !ok {
+ return params.Unknown, fmt.Errorf("no OS to OS type mapping for %s", os)
+ }
+ return osType, nil
+}
+
+func GetTools(osType params.OSType, osArch params.OSArch, tools []params.RunnerApplicationDownload) (params.RunnerApplicationDownload, error) {
+ // Validate image OS. Linux only for now.
+ switch osType {
+ case params.Linux:
+ case params.Windows:
+ default:
+ return params.RunnerApplicationDownload{}, fmt.Errorf("unsupported OS type: %s", osType)
+ }
+
+ switch osArch {
+ case params.Amd64:
+ case params.Arm:
+ case params.Arm64:
+ default:
+ return params.RunnerApplicationDownload{}, fmt.Errorf("unsupported OS arch: %s", osArch)
+ }
+
+ // Find tools for OS/Arch.
+ for _, tool := range tools {
+ if tool.GetOS() == "" || tool.GetArchitecture() == "" {
+ continue
+ }
+
+ ghArch, err := ResolveToGithubArch(string(osArch))
+ if err != nil {
+ continue
+ }
+
+ ghOS, err := ResolveToGithubOSType(string(osType))
+ if err != nil {
+ continue
+ }
+ if tool.GetArchitecture() == ghArch && tool.GetOS() == ghOS {
+ return tool, nil
+ }
+ }
+ return params.RunnerApplicationDownload{}, fmt.Errorf("failed to find tools for OS %s and arch %s", osType, osArch)
+}
+
+// GetRandomString returns a secure random string
+func GetRandomString(n int) (string, error) {
+ data := make([]byte, n)
+ _, err := rand.Read(data)
+ if err != nil {
+ return "", errors.Wrap(err, "getting random data")
+ }
+ for i, b := range data {
+ data[i] = alphanumeric[b%byte(len(alphanumeric))]
+ }
+
+ return string(data), nil
+}
+
+// PaswsordToBcrypt returns a bcrypt hash of the specified password using the default cost
+func PaswsordToBcrypt(password string) (string, error) {
+ hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
+ if err != nil {
+ return "", fmt.Errorf("failed to hash password")
+ }
+ return string(hashedPassword), nil
+}
+
+func NewLoggingMiddleware(writer io.Writer) func(http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return gorillaHandlers.CombinedLoggingHandler(writer, next)
+ }
+}
+
+func SanitizeLogEntry(entry string) string {
+ return strings.Replace(strings.Replace(entry, "\n", "", -1), "\r", "", -1)
+}
+
+func toBase62(uuid []byte) string {
+ var i big.Int
+ i.SetBytes(uuid[:])
+ return i.Text(62)
+}
+
+func NewID() string {
+ short, err := shortid.Generate()
+ if err == nil {
+ return toBase62([]byte(short))
+ }
+ newUUID := uuid.New()
+ return toBase62(newUUID[:])
+}
+
+func UTF16FromString(s string) ([]uint16, error) {
+ buf := make([]uint16, 0, len(s)*2+1)
+ for _, r := range s {
+ buf = utf16.AppendRune(buf, r)
+ }
+ return utf16.AppendRune(buf, '\x00'), nil
+}
+
+func UTF16ToString(s []uint16) string {
+ for i, v := range s {
+ if v == 0 {
+ s = s[0:i]
+ break
+ }
+ }
+ return string(utf16.Decode(s))
+}
+
+func Uint16ToByteArray(u []uint16) []byte {
+ ret := make([]byte, (len(u)-1)*2)
+ for i := 0; i < len(u)-1; i++ {
+ binary.LittleEndian.PutUint16(ret[i*2:], uint16(u[i]))
+ }
+ return ret
+}
+
+func UTF16EncodedByteArrayFromString(s string) ([]byte, error) {
+ asUint16, err := UTF16FromString(s)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode to uint16: %w", err)
+ }
+ asBytes := Uint16ToByteArray(asUint16)
+ return asBytes, nil
+}
+
+func CompressData(data []byte) ([]byte, error) {
+ var b bytes.Buffer
+ gz := gzip.NewWriter(&b)
+
+ _, err := gz.Write(data)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compress data: %w", err)
+ }
+
+ if err = gz.Flush(); err != nil {
+ return nil, fmt.Errorf("failed to flush buffer: %w", err)
+ }
+
+ if err = gz.Close(); err != nil {
+ return nil, fmt.Errorf("failed to close buffer: %w", err)
+ }
+
+ return b.Bytes(), nil
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/websocket/reader.go b/vendor/github.com/cloudbase/garm-provider-common/util/websocket/reader.go
new file mode 100644
index 00000000..92ed2edf
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/websocket/reader.go
@@ -0,0 +1,184 @@
+package websocket
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/gorilla/websocket"
+)
+
+const (
+ // Time allowed to write a message to the peer.
+ writeWait = 10 * time.Second
+
+ // Time allowed to read the next pong message from the peer.
+ pongWait = 60 * time.Second
+
+ // Send pings to peer with this period. Must be less than pongWait.
+ pingPeriod = (pongWait * 9) / 10
+
+ // Maximum message size allowed from peer.
+ maxMessageSize = 16384 // 16 KB
+)
+
+// MessageHandler is a function that processes a message received from a websocket connection.
+type MessageHandler func(msgType int, msg []byte) error
+
+type APIErrorResponse struct {
+ Error string `json:"error"`
+ Details string `json:"details"`
+}
+
+// NewReader creates a new websocket reader. The reader will pass on any message it receives to the
+// handler function. The handler function should return an error if it fails to process the message.
+func NewReader(ctx context.Context, baseURL, pth, token string, handler MessageHandler) (*Reader, error) {
+ parsedURL, err := url.Parse(baseURL)
+ if err != nil {
+ return nil, err
+ }
+
+ wsScheme := "ws"
+ if parsedURL.Scheme == "https" {
+ wsScheme = "wss"
+ }
+ u := url.URL{Scheme: wsScheme, Host: parsedURL.Host, Path: pth}
+ header := http.Header{}
+ header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
+
+ return &Reader{
+ ctx: ctx,
+ url: u,
+ header: header,
+ handler: handler,
+ done: make(chan struct{}),
+ }, nil
+}
+
+type Reader struct {
+ ctx context.Context
+ url url.URL
+ header http.Header
+
+ done chan struct{}
+ running bool
+
+ handler MessageHandler
+
+ conn *websocket.Conn
+ mux sync.Mutex
+ writeMux sync.Mutex
+}
+
+func (w *Reader) Stop() {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if !w.running {
+ return
+ }
+ w.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
+ w.conn.Close()
+ close(w.done)
+ w.running = false
+}
+
+func (w *Reader) Done() <-chan struct{} {
+ return w.done
+}
+
+func (w *Reader) WriteMessage(messageType int, data []byte) error {
+ // The websocket package does not support concurrent writes and panics if it
+ // detects that one has occurred, so we need to lock the writeMux to prevent
+ // concurrent writes to the same connection.
+ w.writeMux.Lock()
+ defer w.writeMux.Unlock()
+ if !w.running {
+ return fmt.Errorf("websocket is not running")
+ }
+ if err := w.conn.SetWriteDeadline(time.Now().Add(writeWait)); err != nil {
+ return err
+ }
+ return w.conn.WriteMessage(messageType, data)
+}
+
+func (w *Reader) Start() error {
+ w.mux.Lock()
+ defer w.mux.Unlock()
+ if w.running {
+ return nil
+ }
+
+ c, response, err := websocket.DefaultDialer.Dial(w.url.String(), w.header)
+ if err != nil {
+ var resp APIErrorResponse
+ var msg string
+ var status string
+ if response != nil {
+ if response.Body != nil {
+ if err := json.NewDecoder(response.Body).Decode(&resp); err == nil {
+ msg = resp.Details
+ }
+ }
+ status = response.Status
+ }
+ return fmt.Errorf("failed to stream logs: %q %s (%s)", err, msg, status)
+ }
+ w.conn = c
+ w.running = true
+ go w.loop()
+ go w.handlerReader()
+ return nil
+}
+
+func (w *Reader) handlerReader() {
+ defer w.Stop()
+ w.writeMux.Lock()
+ w.conn.SetReadLimit(maxMessageSize)
+ w.conn.SetReadDeadline(time.Now().Add(pongWait))
+ w.conn.SetPongHandler(func(string) error { w.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
+ w.writeMux.Unlock()
+ for {
+ msgType, message, err := w.conn.ReadMessage()
+ if err != nil {
+ if IsErrorOfInterest(err) {
+ // TODO(gabriel-samfira): we should allow for an error channel that can be used to signal
+ // the caller that the connection has been closed.
+ slog.With(slog.Any("error", err)).Error("reading log message")
+ }
+ return
+ }
+ if w.handler != nil {
+ if err := w.handler(msgType, message); err != nil {
+ slog.With(slog.Any("error", err)).Error("handling log message")
+ }
+ }
+ }
+}
+
+func (w *Reader) loop() {
+ defer w.Stop()
+ ticker := time.NewTicker(pingPeriod)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-w.ctx.Done():
+ return
+ case <-w.Done():
+ return
+ case <-ticker.C:
+ w.writeMux.Lock()
+ w.conn.SetWriteDeadline(time.Now().Add(writeWait))
+ err := w.conn.WriteMessage(websocket.PingMessage, nil)
+ if err != nil {
+ w.writeMux.Unlock()
+ return
+ }
+ w.writeMux.Unlock()
+ }
+ }
+}
diff --git a/vendor/github.com/cloudbase/garm-provider-common/util/websocket/util.go b/vendor/github.com/cloudbase/garm-provider-common/util/websocket/util.go
new file mode 100644
index 00000000..88c02fa5
--- /dev/null
+++ b/vendor/github.com/cloudbase/garm-provider-common/util/websocket/util.go
@@ -0,0 +1,37 @@
+package websocket
+
+import (
+ "errors"
+ "net"
+
+ "github.com/gorilla/websocket"
+)
+
+func IsErrorOfInterest(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ if errors.Is(err, websocket.ErrCloseSent) {
+ return false
+ }
+
+ if errors.Is(err, websocket.ErrBadHandshake) {
+ return false
+ }
+
+ if errors.Is(err, net.ErrClosed) {
+ return false
+ }
+
+ asCloseErr, ok := err.(*websocket.CloseError)
+ if ok {
+ switch asCloseErr.Code {
+ case websocket.CloseNormalClosure, websocket.CloseGoingAway,
+ websocket.CloseNoStatusReceived, websocket.CloseAbnormalClosure:
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml
deleted file mode 100644
index bfc42120..00000000
--- a/vendor/github.com/felixge/httpsnoop/.travis.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-language: go
-
-go:
- - 1.6
- - 1.7
- - 1.8
diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile
index 2d84889a..4e12afdd 100644
--- a/vendor/github.com/felixge/httpsnoop/Makefile
+++ b/vendor/github.com/felixge/httpsnoop/Makefile
@@ -1,7 +1,7 @@
.PHONY: ci generate clean
ci: clean generate
- go test -v ./...
+ go test -race -v ./...
generate:
go generate .
diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md
index ddcecd13..cf6b42f3 100644
--- a/vendor/github.com/felixge/httpsnoop/README.md
+++ b/vendor/github.com/felixge/httpsnoop/README.md
@@ -7,8 +7,8 @@ http.Handlers.
Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
which is also exposed for users interested in a more low-level API.
-[](https://godoc.org/github.com/felixge/httpsnoop)
-[](https://travis-ci.org/felixge/httpsnoop)
+[](https://pkg.go.dev/github.com/felixge/httpsnoop)
+[](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml)
## Usage Example
diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
index b77cc7c0..bec7b71b 100644
--- a/vendor/github.com/felixge/httpsnoop/capture_metrics.go
+++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
@@ -52,7 +52,7 @@ func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWri
return func(code int) {
next(code)
- if !headerWritten {
+ if !(code >= 100 && code <= 199) && !headerWritten {
m.Code = code
headerWritten = true
}
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
index 31cbdfb8..101cedde 100644
--- a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
@@ -1,5 +1,5 @@
// +build go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
index ab99c07c..e0951df1 100644
--- a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
@@ -1,5 +1,5 @@
// +build !go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop
diff --git a/vendor/github.com/flosch/pongo2/.gitattributes b/vendor/github.com/flosch/pongo2/.gitattributes
deleted file mode 100644
index fcadb2cf..00000000
--- a/vendor/github.com/flosch/pongo2/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-* text eol=lf
diff --git a/vendor/github.com/flosch/pongo2/.gitignore b/vendor/github.com/flosch/pongo2/.gitignore
deleted file mode 100644
index 1346be55..00000000
--- a/vendor/github.com/flosch/pongo2/.gitignore
+++ /dev/null
@@ -1,41 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-.idea
-.vscode
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-.project
-EBNF.txt
-test1.tpl
-pongo2_internal_test.go
-tpl-error.out
-/count.out
-/cover.out
-*.swp
-*.iml
-/cpu.out
-/mem.out
-/pongo2.test
-*.error
-/profile
-/coverage.out
-/pongo2_internal_test.ignore
diff --git a/vendor/github.com/flosch/pongo2/.travis.yml b/vendor/github.com/flosch/pongo2/.travis.yml
deleted file mode 100644
index e39e5d05..00000000
--- a/vendor/github.com/flosch/pongo2/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-os:
- - linux
- - osx
-go:
- - 1.12
-script:
- - go test -v
diff --git a/vendor/github.com/flosch/pongo2/AUTHORS b/vendor/github.com/flosch/pongo2/AUTHORS
deleted file mode 100644
index 601697cf..00000000
--- a/vendor/github.com/flosch/pongo2/AUTHORS
+++ /dev/null
@@ -1,11 +0,0 @@
-Main author and maintainer of pongo2:
-
-* Florian Schlachter
-
-Contributors (in no specific order):
-
-* @romanoaugusto88
-* @vitalbh
-* @blaubaer
-
-Feel free to add yourself to the list or to modify your entry if you did a contribution.
diff --git a/vendor/github.com/flosch/pongo2/LICENSE b/vendor/github.com/flosch/pongo2/LICENSE
deleted file mode 100644
index e876f869..00000000
--- a/vendor/github.com/flosch/pongo2/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013-2014 Florian Schlachter
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/flosch/pongo2/README.md b/vendor/github.com/flosch/pongo2/README.md
deleted file mode 100644
index e59694e2..00000000
--- a/vendor/github.com/flosch/pongo2/README.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# [pongo](https://en.wikipedia.org/wiki/Pongo_%28genus%29)2
-
-[](https://pkg.go.dev/flosch/pongo2)
-[](https://travis-ci.org/flosch/pongo2)
-
-pongo2 is a Django-syntax like templating-language.
-
-Install/update using `go get` (no dependencies required by pongo2):
-
-```sh
-go get -u github.com/flosch/pongo2
-```
-
-Please use the [issue tracker](https://github.com/flosch/pongo2/issues) if you're encountering any problems with pongo2 or if you need help with implementing tags or filters ([create a ticket!](https://github.com/flosch/pongo2/issues/new)).
-
-## First impression of a template
-
-```django
-
-
- Our admins and users
-
- {# This is a short example to give you a quick overview of pongo2's syntax. #}
- {% macro user_details(user, is_admin=false) %}
-
-
-
- = 40) || (user.karma > calc_avg_karma(userlist)+5) %} class="karma-good"{%
- endif %}>
-
-
- {{ user }}
-
-
-
-
This user registered {{ user.register_date|naturaltime }}.
-
-
-
The user's biography:
-
- {{ user.biography|markdown|truncatewords_html:15 }}
- read more
-
-
- {% if is_admin %}
-
This user is an admin!
- {% endif %}
-
- {% endmacro %}
-
-
-
-
- Our admins
- {% for admin in adminlist %} {{ user_details(admin, true) }} {% endfor %}
-
- Our members
- {% for user in userlist %} {{ user_details(user) }} {% endfor %}
-
-
-```
-
-## Features
-
-- Syntax- and feature-set-compatible with [Django 1.7](https://django.readthedocs.io/en/1.7.x/topics/templates.html)
-- [Advanced C-like expressions](https://github.com/flosch/pongo2/blob/master/template_tests/expressions.tpl).
-- [Complex function calls within expressions](https://github.com/flosch/pongo2/blob/master/template_tests/function_calls_wrapper.tpl).
-- [Easy API to create new filters and tags](http://godoc.org/github.com/flosch/pongo2#RegisterFilter) ([including parsing arguments](http://godoc.org/github.com/flosch/pongo2#Parser))
-- Additional features:
- - Macros including importing macros from other files (see [template_tests/macro.tpl](https://github.com/flosch/pongo2/blob/master/template_tests/macro.tpl))
- - [Template sandboxing](https://godoc.org/github.com/flosch/pongo2#TemplateSet) ([directory patterns](http://golang.org/pkg/path/filepath/#Match), banned tags/filters)
-
-## Caveats
-
-### Filters
-
-- **date** / **time**: The `date` and `time` filter are taking the Golang specific time- and date-format (not Django's one) currently. [Take a look on the format here](http://golang.org/pkg/time/#Time.Format).
-- **stringformat**: `stringformat` does **not** take Python's string format syntax as a parameter, instead it takes Go's. Essentially `{{ 3.14|stringformat:"pi is %.2f" }}` is `fmt.Sprintf("pi is %.2f", 3.14)`.
-- **escape** / **force_escape**: Unlike Django's behaviour, the `escape`-filter is applied immediately. Therefore there is no need for a `force_escape`-filter yet.
-
-### Tags
-
-- **for**: All the `forloop` fields (like `forloop.counter`) are written with a capital letter at the beginning. For example, the `counter` can be accessed by `forloop.Counter` and the parentloop by `forloop.Parentloop`.
-- **now**: takes Go's time format (see **date** and **time**-filter).
-
-### Misc
-
-- **not in-operator**: You can check whether a map/struct/string contains a key/field/substring by using the in-operator (or the negation of it):
- `{% if key in map %}Key is in map{% else %}Key not in map{% endif %}` or `{% if !(key in map) %}Key is NOT in map{% else %}Key is in map{% endif %}`.
-
-## Add-ons, libraries and helpers
-
-### Official
-
-- [pongo2-addons](https://github.com/flosch/pongo2-addons) - Official additional filters/tags for pongo2 (for example a **markdown**-filter). They are in their own repository because they're relying on 3rd-party-libraries.
-
-### 3rd-party
-
-- [beego-pongo2](https://github.com/oal/beego-pongo2) - A tiny little helper for using Pongo2 with [Beego](https://github.com/astaxie/beego).
-- [beego-pongo2.v2](https://github.com/ipfans/beego-pongo2.v2) - Same as `beego-pongo2`, but for pongo2 v2.
-- [macaron-pongo2](https://github.com/macaron-contrib/pongo2) - pongo2 support for [Macaron](https://github.com/Unknwon/macaron), a modular web framework.
-- [ginpongo2](https://github.com/ngerakines/ginpongo2) - middleware for [gin](github.com/gin-gonic/gin) to use pongo2 templates
-- [Build'n support for Iris' template engine](https://github.com/kataras/iris)
-- [pongo2gin](https://gitlab.com/go-box/pongo2gin) - alternative renderer for [gin](github.com/gin-gonic/gin) to use pongo2 templates
-- [pongo2-trans](https://github.com/digitalcrab/pongo2trans) - `trans`-tag implementation for internationalization
-- [tpongo2](https://github.com/tango-contrib/tpongo2) - pongo2 support for [Tango](https://github.com/lunny/tango), a micro-kernel & pluggable web framework.
-- [p2cli](https://github.com/wrouesnel/p2cli) - command line templating utility based on pongo2
-
-Please add your project to this list and send me a pull request when you've developed something nice for pongo2.
-
-## Who's using pongo2
-
-[I'm compiling a list of pongo2 users](https://github.com/flosch/pongo2/issues/241). Add your project or company!
-
-## API-usage examples
-
-Please see the documentation for a full list of provided API methods.
-
-### A tiny example (template string)
-
-```go
-// Compile the template first (i. e. creating the AST)
-tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
-if err != nil {
- panic(err)
-}
-// Now you can render the template with the given
-// pongo2.Context how often you want to.
-out, err := tpl.Execute(pongo2.Context{"name": "florian"})
-if err != nil {
- panic(err)
-}
-fmt.Println(out) // Output: Hello Florian!
-```
-
-## Example server-usage (template file)
-
-```go
-package main
-
-import (
- "github.com/flosch/pongo2"
- "net/http"
-)
-
-// Pre-compiling the templates at application startup using the
-// little Must()-helper function (Must() will panic if FromFile()
-// or FromString() will return with an error - that's it).
-// It's faster to pre-compile it anywhere at startup and only
-// execute the template later.
-var tplExample = pongo2.Must(pongo2.FromFile("example.html"))
-
-func examplePage(w http.ResponseWriter, r *http.Request) {
- // Execute the template per HTTP request
- err := tplExample.ExecuteWriter(pongo2.Context{"query": r.FormValue("query")}, w)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- }
-}
-
-func main() {
- http.HandleFunc("/", examplePage)
- http.ListenAndServe(":8080", nil)
-}
-```
diff --git a/vendor/github.com/flosch/pongo2/context.go b/vendor/github.com/flosch/pongo2/context.go
deleted file mode 100644
index dbc5e3e3..00000000
--- a/vendor/github.com/flosch/pongo2/context.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "regexp"
-
- "errors"
-)
-
-var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$")
-
-var autoescape = true
-
-func SetAutoescape(newValue bool) {
- autoescape = newValue
-}
-
-// A Context type provides constants, variables, instances or functions to a template.
-//
-// pongo2 automatically provides meta-information or functions through the "pongo2"-key.
-// Currently, context["pongo2"] contains the following keys:
-// 1. version: returns the version string
-//
-// Template examples for accessing items from your context:
-// {{ myconstant }}
-// {{ myfunc("test", 42) }}
-// {{ user.name }}
-// {{ pongo2.version }}
-type Context map[string]interface{}
-
-func (c Context) checkForValidIdentifiers() *Error {
- for k, v := range c {
- if !reIdentifiers.MatchString(k) {
- return &Error{
- Sender: "checkForValidIdentifiers",
- OrigError: fmt.Errorf("context-key '%s' (value: '%+v') is not a valid identifier", k, v),
- }
- }
- }
- return nil
-}
-
-// Update updates this context with the key/value-pairs from another context.
-func (c Context) Update(other Context) Context {
- for k, v := range other {
- c[k] = v
- }
- return c
-}
-
-// ExecutionContext contains all data important for the current rendering state.
-//
-// If you're writing a custom tag, your tag's Execute()-function will
-// have access to the ExecutionContext. This struct stores anything
-// about the current rendering process's Context including
-// the Context provided by the user (field Public).
-// You can safely use the Private context to provide data to the user's
-// template (like a 'forloop'-information). The Shared-context is used
-// to share data between tags. All ExecutionContexts share this context.
-//
-// Please be careful when accessing the Public data.
-// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only).
-//
-// To create your own execution context within tags, use the
-// NewChildExecutionContext(parent) function.
-type ExecutionContext struct {
- template *Template
-
- Autoescape bool
- Public Context
- Private Context
- Shared Context
-}
-
-var pongo2MetaContext = Context{
- "version": Version,
-}
-
-func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext {
- privateCtx := make(Context)
-
- // Make the pongo2-related funcs/vars available to the context
- privateCtx["pongo2"] = pongo2MetaContext
-
- return &ExecutionContext{
- template: tpl,
-
- Public: ctx,
- Private: privateCtx,
- Autoescape: autoescape,
- }
-}
-
-func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext {
- newctx := &ExecutionContext{
- template: parent.template,
-
- Public: parent.Public,
- Private: make(Context),
- Autoescape: parent.Autoescape,
- }
- newctx.Shared = parent.Shared
-
- // Copy all existing private items
- newctx.Private.Update(parent.Private)
-
- return newctx
-}
-
-func (ctx *ExecutionContext) Error(msg string, token *Token) *Error {
- return ctx.OrigError(errors.New(msg), token)
-}
-
-func (ctx *ExecutionContext) OrigError(err error, token *Token) *Error {
- filename := ctx.template.name
- var line, col int
- if token != nil {
- // No tokens available
- // TODO: Add location (from where?)
- filename = token.Filename
- line = token.Line
- col = token.Col
- }
- return &Error{
- Template: ctx.template,
- Filename: filename,
- Line: line,
- Column: col,
- Token: token,
- Sender: "execution",
- OrigError: err,
- }
-}
-
-func (ctx *ExecutionContext) Logf(format string, args ...interface{}) {
- ctx.template.set.logf(format, args...)
-}
diff --git a/vendor/github.com/flosch/pongo2/doc.go b/vendor/github.com/flosch/pongo2/doc.go
deleted file mode 100644
index 5a23e2b2..00000000
--- a/vendor/github.com/flosch/pongo2/doc.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// A Django-syntax like template-engine
-//
-// Blog posts about pongo2 (including introduction and migration):
-// https://www.florian-schlachter.de/?tag=pongo2
-//
-// Complete documentation on the template language:
-// https://docs.djangoproject.com/en/dev/topics/templates/
-//
-// Try out pongo2 live in the pongo2 playground:
-// https://www.florian-schlachter.de/pongo2/
-//
-// Make sure to read README.md in the repository as well.
-//
-// A tiny example with template strings:
-//
-// (Snippet on playground: https://www.florian-schlachter.de/pongo2/?id=1206546277)
-//
-// // Compile the template first (i. e. creating the AST)
-// tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
-// if err != nil {
-// panic(err)
-// }
-// // Now you can render the template with the given
-// // pongo2.Context how often you want to.
-// out, err := tpl.Execute(pongo2.Context{"name": "fred"})
-// if err != nil {
-// panic(err)
-// }
-// fmt.Println(out) // Output: Hello Fred!
-//
-package pongo2
diff --git a/vendor/github.com/flosch/pongo2/error.go b/vendor/github.com/flosch/pongo2/error.go
deleted file mode 100644
index 8aec8c10..00000000
--- a/vendor/github.com/flosch/pongo2/error.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package pongo2
-
-import (
- "bufio"
- "fmt"
- "os"
-)
-
-// The Error type is being used to address an error during lexing, parsing or
-// execution. If you want to return an error object (for example in your own
-// tag or filter) fill this object with as much information as you have.
-// Make sure "Sender" is always given (if you're returning an error within
-// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag').
-// It's okay if you only fill in ErrorMsg if you don't have any other details at hand.
-type Error struct {
- Template *Template
- Filename string
- Line int
- Column int
- Token *Token
- Sender string
- OrigError error
-}
-
-func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error {
- if e.Template == nil {
- e.Template = template
- }
-
- if e.Token == nil {
- e.Token = t
- if e.Line <= 0 {
- e.Line = t.Line
- e.Column = t.Col
- }
- }
-
- return e
-}
-
-// Returns a nice formatted error string.
-func (e *Error) Error() string {
- s := "[Error"
- if e.Sender != "" {
- s += " (where: " + e.Sender + ")"
- }
- if e.Filename != "" {
- s += " in " + e.Filename
- }
- if e.Line > 0 {
- s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column)
- if e.Token != nil {
- s += fmt.Sprintf(" near '%s'", e.Token.Val)
- }
- }
- s += "] "
- s += e.OrigError.Error()
- return s
-}
-
-// RawLine returns the affected line from the original template, if available.
-func (e *Error) RawLine() (line string, available bool, outErr error) {
- if e.Line <= 0 || e.Filename == "" {
- return "", false, nil
- }
-
- filename := e.Filename
- if e.Template != nil {
- filename = e.Template.set.resolveFilename(e.Template, e.Filename)
- }
- file, err := os.Open(filename)
- if err != nil {
- return "", false, err
- }
- defer func() {
- err := file.Close()
- if err != nil && outErr == nil {
- outErr = err
- }
- }()
-
- scanner := bufio.NewScanner(file)
- l := 0
- for scanner.Scan() {
- l++
- if l == e.Line {
- return scanner.Text(), true, nil
- }
- }
- return "", false, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/filters.go b/vendor/github.com/flosch/pongo2/filters.go
deleted file mode 100644
index 8d4c89e2..00000000
--- a/vendor/github.com/flosch/pongo2/filters.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package pongo2
-
-import (
- "fmt"
-)
-
-// FilterFunction is the type filter functions must fulfil
-type FilterFunction func(in *Value, param *Value) (out *Value, err *Error)
-
-var filters map[string]FilterFunction
-
-func init() {
- filters = make(map[string]FilterFunction)
-}
-
-// FilterExists returns true if the given filter is already registered
-func FilterExists(name string) bool {
- _, existing := filters[name]
- return existing
-}
-
-// RegisterFilter registers a new filter. If there's already a filter with the same
-// name, RegisterFilter will panic. You usually want to call this
-// function in the filter's init() function:
-// http://golang.org/doc/effective_go.html#init
-//
-// See http://www.florian-schlachter.de/post/pongo2/ for more about
-// writing filters and tags.
-func RegisterFilter(name string, fn FilterFunction) error {
- if FilterExists(name) {
- return fmt.Errorf("filter with name '%s' is already registered", name)
- }
- filters[name] = fn
- return nil
-}
-
-// ReplaceFilter replaces an already registered filter with a new implementation. Use this
-// function with caution since it allows you to change existing filter behaviour.
-func ReplaceFilter(name string, fn FilterFunction) error {
- if !FilterExists(name) {
- return fmt.Errorf("filter with name '%s' does not exist (therefore cannot be overridden)", name)
- }
- filters[name] = fn
- return nil
-}
-
-// MustApplyFilter behaves like ApplyFilter, but panics on an error.
-func MustApplyFilter(name string, value *Value, param *Value) *Value {
- val, err := ApplyFilter(name, value, param)
- if err != nil {
- panic(err)
- }
- return val
-}
-
-// ApplyFilter applies a filter to a given value using the given parameters.
-// Returns a *pongo2.Value or an error.
-func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) {
- fn, existing := filters[name]
- if !existing {
- return nil, &Error{
- Sender: "applyfilter",
- OrigError: fmt.Errorf("Filter with name '%s' not found.", name),
- }
- }
-
- // Make sure param is a *Value
- if param == nil {
- param = AsValue(nil)
- }
-
- return fn(value, param)
-}
-
-type filterCall struct {
- token *Token
-
- name string
- parameter IEvaluator
-
- filterFunc FilterFunction
-}
-
-func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) {
- var param *Value
- var err *Error
-
- if fc.parameter != nil {
- param, err = fc.parameter.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- } else {
- param = AsValue(nil)
- }
-
- filteredValue, err := fc.filterFunc(v, param)
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token)
- }
- return filteredValue, nil
-}
-
-// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter
-func (p *Parser) parseFilter() (*filterCall, *Error) {
- identToken := p.MatchType(TokenIdentifier)
-
- // Check filter ident
- if identToken == nil {
- return nil, p.Error("Filter name must be an identifier.", nil)
- }
-
- filter := &filterCall{
- token: identToken,
- name: identToken.Val,
- }
-
- // Get the appropriate filter function and bind it
- filterFn, exists := filters[identToken.Val]
- if !exists {
- return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", identToken.Val), identToken)
- }
-
- filter.filterFunc = filterFn
-
- // Check for filter-argument (2 tokens needed: ':' ARG)
- if p.Match(TokenSymbol, ":") != nil {
- if p.Peek(TokenSymbol, "}}") != nil {
- return nil, p.Error("Filter parameter required after ':'.", nil)
- }
-
- // Get filter argument expression
- v, err := p.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- filter.parameter = v
- }
-
- return filter, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/filters_builtin.go b/vendor/github.com/flosch/pongo2/filters_builtin.go
deleted file mode 100644
index c0ec6161..00000000
--- a/vendor/github.com/flosch/pongo2/filters_builtin.go
+++ /dev/null
@@ -1,927 +0,0 @@
-package pongo2
-
-/* Filters that are provided through github.com/flosch/pongo2-addons:
- ------------------------------------------------------------------
-
- filesizeformat
- slugify
- timesince
- timeuntil
-
- Filters that won't be added:
- ----------------------------
-
- get_static_prefix (reason: web-framework specific)
- pprint (reason: python-specific)
- static (reason: web-framework specific)
-
- Reconsideration (not implemented yet):
- --------------------------------------
-
- force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
- safeseq (reason: same reason as `force_escape`)
- unordered_list (python-specific; not sure whether needed or not)
- dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
- dictsortreversed (see dictsort)
-*/
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "net/url"
- "regexp"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-
- "errors"
-)
-
-func init() {
- rand.Seed(time.Now().Unix())
-
- RegisterFilter("escape", filterEscape)
- RegisterFilter("safe", filterSafe)
- RegisterFilter("escapejs", filterEscapejs)
-
- RegisterFilter("add", filterAdd)
- RegisterFilter("addslashes", filterAddslashes)
- RegisterFilter("capfirst", filterCapfirst)
- RegisterFilter("center", filterCenter)
- RegisterFilter("cut", filterCut)
- RegisterFilter("date", filterDate)
- RegisterFilter("default", filterDefault)
- RegisterFilter("default_if_none", filterDefaultIfNone)
- RegisterFilter("divisibleby", filterDivisibleby)
- RegisterFilter("first", filterFirst)
- RegisterFilter("floatformat", filterFloatformat)
- RegisterFilter("get_digit", filterGetdigit)
- RegisterFilter("iriencode", filterIriencode)
- RegisterFilter("join", filterJoin)
- RegisterFilter("last", filterLast)
- RegisterFilter("length", filterLength)
- RegisterFilter("length_is", filterLengthis)
- RegisterFilter("linebreaks", filterLinebreaks)
- RegisterFilter("linebreaksbr", filterLinebreaksbr)
- RegisterFilter("linenumbers", filterLinenumbers)
- RegisterFilter("ljust", filterLjust)
- RegisterFilter("lower", filterLower)
- RegisterFilter("make_list", filterMakelist)
- RegisterFilter("phone2numeric", filterPhone2numeric)
- RegisterFilter("pluralize", filterPluralize)
- RegisterFilter("random", filterRandom)
- RegisterFilter("removetags", filterRemovetags)
- RegisterFilter("rjust", filterRjust)
- RegisterFilter("slice", filterSlice)
- RegisterFilter("split", filterSplit)
- RegisterFilter("stringformat", filterStringformat)
- RegisterFilter("striptags", filterStriptags)
- RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
- RegisterFilter("title", filterTitle)
- RegisterFilter("truncatechars", filterTruncatechars)
- RegisterFilter("truncatechars_html", filterTruncatecharsHTML)
- RegisterFilter("truncatewords", filterTruncatewords)
- RegisterFilter("truncatewords_html", filterTruncatewordsHTML)
- RegisterFilter("upper", filterUpper)
- RegisterFilter("urlencode", filterUrlencode)
- RegisterFilter("urlize", filterUrlize)
- RegisterFilter("urlizetrunc", filterUrlizetrunc)
- RegisterFilter("wordcount", filterWordcount)
- RegisterFilter("wordwrap", filterWordwrap)
- RegisterFilter("yesno", filterYesno)
-
- RegisterFilter("float", filterFloat) // pongo-specific
- RegisterFilter("integer", filterInteger) // pongo-specific
-}
-
-func filterTruncatecharsHelper(s string, newLen int) string {
- runes := []rune(s)
- if newLen < len(runes) {
- if newLen >= 3 {
- return fmt.Sprintf("%s...", string(runes[:newLen-3]))
- }
- // Not enough space for the ellipsis
- return string(runes[:newLen])
- }
- return string(runes)
-}
-
-func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
- vLen := len(value)
- var tagStack []string
- idx := 0
-
- for idx < vLen && !cond() {
- c, s := utf8.DecodeRuneInString(value[idx:])
- if c == utf8.RuneError {
- idx += s
- continue
- }
-
- if c == '<' {
- newOutput.WriteRune(c)
- idx += s // consume "<"
-
- if idx+1 < vLen {
- if value[idx] == '/' {
- // Close tag
-
- newOutput.WriteString("/")
-
- tag := ""
- idx++ // consume "/"
-
- for idx < vLen {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- // End of tag found
- if c2 == '>' {
- idx++ // consume ">"
- break
- }
- tag += string(c2)
- idx += size2
- }
-
- if len(tagStack) > 0 {
- // Ideally, the close tag is TOP of tag stack
- // In malformed HTML, it must not be, so iterate through the stack and remove the tag
- for i := len(tagStack) - 1; i >= 0; i-- {
- if tagStack[i] == tag {
- // Found the tag
- tagStack[i] = tagStack[len(tagStack)-1]
- tagStack = tagStack[:len(tagStack)-1]
- break
- }
- }
- }
-
- newOutput.WriteString(tag)
- newOutput.WriteString(">")
- } else {
- // Open tag
-
- tag := ""
-
- params := false
- for idx < vLen {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- newOutput.WriteRune(c2)
-
- // End of tag found
- if c2 == '>' {
- idx++ // consume ">"
- break
- }
-
- if !params {
- if c2 == ' ' {
- params = true
- } else {
- tag += string(c2)
- }
- }
-
- idx += size2
- }
-
- // Add tag to stack
- tagStack = append(tagStack, tag)
- }
- }
- } else {
- idx = fn(c, s, idx)
- }
- }
-
- finalize()
-
- for i := len(tagStack) - 1; i >= 0; i-- {
- tag := tagStack[i]
- // Close everything from the regular tag stack
- newOutput.WriteString(fmt.Sprintf("%s>", tag))
- }
-}
-
-func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- newLen := param.Integer()
- return AsValue(filterTruncatecharsHelper(s, newLen)), nil
-}
-
-func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) {
- value := in.String()
- newLen := max(param.Integer()-3, 0)
-
- newOutput := bytes.NewBuffer(nil)
-
- textcounter := 0
-
- filterTruncateHTMLHelper(value, newOutput, func() bool {
- return textcounter >= newLen
- }, func(c rune, s int, idx int) int {
- textcounter++
- newOutput.WriteRune(c)
-
- return idx + s
- }, func() {
- if textcounter >= newLen && textcounter < len(value) {
- newOutput.WriteString("...")
- }
- })
-
- return AsSafeValue(newOutput.String()), nil
-}
-
-func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
- words := strings.Fields(in.String())
- n := param.Integer()
- if n <= 0 {
- return AsValue(""), nil
- }
- nlen := min(len(words), n)
- out := make([]string, 0, nlen)
- for i := 0; i < nlen; i++ {
- out = append(out, words[i])
- }
-
- if n < len(words) {
- out = append(out, "...")
- }
-
- return AsValue(strings.Join(out, " ")), nil
-}
-
-func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) {
- value := in.String()
- newLen := max(param.Integer(), 0)
-
- newOutput := bytes.NewBuffer(nil)
-
- wordcounter := 0
-
- filterTruncateHTMLHelper(value, newOutput, func() bool {
- return wordcounter >= newLen
- }, func(_ rune, _ int, idx int) int {
- // Get next word
- wordFound := false
-
- for idx < len(value) {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- if c2 == '<' {
- // HTML tag start, don't consume it
- return idx
- }
-
- newOutput.WriteRune(c2)
- idx += size2
-
- if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
- // Word ends here, stop capturing it now
- break
- } else {
- wordFound = true
- }
- }
-
- if wordFound {
- wordcounter++
- }
-
- return idx
- }, func() {
- if wordcounter >= newLen {
- newOutput.WriteString("...")
- }
- })
-
- return AsSafeValue(newOutput.String()), nil
-}
-
-func filterEscape(in *Value, param *Value) (*Value, *Error) {
- output := strings.Replace(in.String(), "&", "&", -1)
- output = strings.Replace(output, ">", ">", -1)
- output = strings.Replace(output, "<", "<", -1)
- output = strings.Replace(output, "\"", """, -1)
- output = strings.Replace(output, "'", "'", -1)
- return AsValue(output), nil
-}
-
-func filterSafe(in *Value, param *Value) (*Value, *Error) {
- return in, nil // nothing to do here, just to keep track of the safe application
-}
-
-func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
- sin := in.String()
-
- var b bytes.Buffer
-
- idx := 0
- for idx < len(sin) {
- c, size := utf8.DecodeRuneInString(sin[idx:])
- if c == utf8.RuneError {
- idx += size
- continue
- }
-
- if c == '\\' {
- // Escape seq?
- if idx+1 < len(sin) {
- switch sin[idx+1] {
- case 'r':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
- idx += 2
- continue
- case 'n':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
- idx += 2
- continue
- /*case '\'':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
- idx += 2
- continue
- case '"':
- b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
- idx += 2
- continue*/
- }
- }
- }
-
- if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
- b.WriteRune(c)
- } else {
- b.WriteString(fmt.Sprintf(`\u%04X`, c))
- }
-
- idx += size
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterAdd(in *Value, param *Value) (*Value, *Error) {
- if in.IsNumber() && param.IsNumber() {
- if in.IsFloat() || param.IsFloat() {
- return AsValue(in.Float() + param.Float()), nil
- }
- return AsValue(in.Integer() + param.Integer()), nil
- }
- // If in/param is not a number, we're relying on the
- // Value's String() conversion and just add them both together
- return AsValue(in.String() + param.String()), nil
-}
-
-func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
- output := strings.Replace(in.String(), "\\", "\\\\", -1)
- output = strings.Replace(output, "\"", "\\\"", -1)
- output = strings.Replace(output, "'", "\\'", -1)
- return AsValue(output), nil
-}
-
-func filterCut(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
-}
-
-func filterLength(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Len()), nil
-}
-
-func filterLengthis(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Len() == param.Integer()), nil
-}
-
-func filterDefault(in *Value, param *Value) (*Value, *Error) {
- if !in.IsTrue() {
- return param, nil
- }
- return in, nil
-}
-
-func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
- if in.IsNil() {
- return param, nil
- }
- return in, nil
-}
-
-func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
- if param.Integer() == 0 {
- return AsValue(false), nil
- }
- return AsValue(in.Integer()%param.Integer() == 0), nil
-}
-
-func filterFirst(in *Value, param *Value) (*Value, *Error) {
- if in.CanSlice() && in.Len() > 0 {
- return in.Index(0), nil
- }
- return AsValue(""), nil
-}
-
-func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
- val := in.Float()
-
- decimals := -1
- if !param.IsNil() {
- // Any argument provided?
- decimals = param.Integer()
- }
-
- // if the argument is not a number (e. g. empty), the default
- // behaviour is trim the result
- trim := !param.IsNumber()
-
- if decimals <= 0 {
- // argument is negative or zero, so we
- // want the output being trimmed
- decimals = -decimals
- trim = true
- }
-
- if trim {
- // Remove zeroes
- if float64(int(val)) == val {
- return AsValue(in.Integer()), nil
- }
- }
-
- return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
-}
-
-func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
- i := param.Integer()
- l := len(in.String()) // do NOT use in.Len() here!
- if i <= 0 || i > l {
- return in, nil
- }
- return AsValue(in.String()[l-i] - 48), nil
-}
-
-const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
-
-func filterIriencode(in *Value, param *Value) (*Value, *Error) {
- var b bytes.Buffer
-
- sin := in.String()
- for _, r := range sin {
- if strings.IndexRune(filterIRIChars, r) >= 0 {
- b.WriteRune(r)
- } else {
- b.WriteString(url.QueryEscape(string(r)))
- }
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterJoin(in *Value, param *Value) (*Value, *Error) {
- if !in.CanSlice() {
- return in, nil
- }
- sep := param.String()
- sl := make([]string, 0, in.Len())
- for i := 0; i < in.Len(); i++ {
- sl = append(sl, in.Index(i).String())
- }
- return AsValue(strings.Join(sl, sep)), nil
-}
-
-func filterLast(in *Value, param *Value) (*Value, *Error) {
- if in.CanSlice() && in.Len() > 0 {
- return in.Index(in.Len() - 1), nil
- }
- return AsValue(""), nil
-}
-
-func filterUpper(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.ToUpper(in.String())), nil
-}
-
-func filterLower(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.ToLower(in.String())), nil
-}
-
-func filterMakelist(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- result := make([]string, 0, len(s))
- for _, c := range s {
- result = append(result, string(c))
- }
- return AsValue(result), nil
-}
-
-func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
- if in.Len() <= 0 {
- return AsValue(""), nil
- }
- t := in.String()
- r, size := utf8.DecodeRuneInString(t)
- return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
-}
-
-func filterCenter(in *Value, param *Value) (*Value, *Error) {
- width := param.Integer()
- slen := in.Len()
- if width <= slen {
- return in, nil
- }
-
- spaces := width - slen
- left := spaces/2 + spaces%2
- right := spaces / 2
-
- return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
- in.String(), strings.Repeat(" ", right))), nil
-}
-
-func filterDate(in *Value, param *Value) (*Value, *Error) {
- t, isTime := in.Interface().(time.Time)
- if !isTime {
- return nil, &Error{
- Sender: "filter:date",
- OrigError: errors.New("filter input argument must be of type 'time.Time'"),
- }
- }
- return AsValue(t.Format(param.String())), nil
-}
-
-func filterFloat(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Float()), nil
-}
-
-func filterInteger(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Integer()), nil
-}
-
-func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
- if in.Len() == 0 {
- return in, nil
- }
-
- var b bytes.Buffer
-
- // Newline =
- // Double newline = ...
- lines := strings.Split(in.String(), "\n")
- lenlines := len(lines)
-
- opened := false
-
- for idx, line := range lines {
-
- if !opened {
- b.WriteString("")
- opened = true
- }
-
- b.WriteString(line)
-
- if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
- // We've not reached the end
- if strings.TrimSpace(lines[idx+1]) == "" {
- // Next line is empty
- if opened {
- b.WriteString("
")
- opened = false
- }
- } else {
- b.WriteString(" ")
- }
- }
- }
-
- if opened {
- b.WriteString("
")
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterSplit(in *Value, param *Value) (*Value, *Error) {
- chunks := strings.Split(in.String(), param.String())
-
- return AsValue(chunks), nil
-}
-
-func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.Replace(in.String(), "\n", " ", -1)), nil
-}
-
-func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
- lines := strings.Split(in.String(), "\n")
- output := make([]string, 0, len(lines))
- for idx, line := range lines {
- output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
- }
- return AsValue(strings.Join(output, "\n")), nil
-}
-
-func filterLjust(in *Value, param *Value) (*Value, *Error) {
- times := param.Integer() - in.Len()
- if times < 0 {
- times = 0
- }
- return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
-}
-
-func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
- return AsValue(url.QueryEscape(in.String())), nil
-}
-
-// TODO: This regexp could do some work
-var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
-var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
-
-func filterUrlizeHelper(input string, autoescape bool, trunc int) (string, error) {
- var soutErr error
- sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
- var prefix string
- var suffix string
- if strings.HasPrefix(raw_url, " ") {
- prefix = " "
- }
- if strings.HasSuffix(raw_url, " ") {
- suffix = " "
- }
-
- raw_url = strings.TrimSpace(raw_url)
-
- t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
- if err != nil {
- soutErr = err
- return ""
- }
- url := t.String()
-
- if !strings.HasPrefix(url, "http") {
- url = fmt.Sprintf("http://%s", url)
- }
-
- title := raw_url
-
- if trunc > 3 && len(title) > trunc {
- title = fmt.Sprintf("%s...", title[:trunc-3])
- }
-
- if autoescape {
- t, err := ApplyFilter("escape", AsValue(title), nil)
- if err != nil {
- soutErr = err
- return ""
- }
- title = t.String()
- }
-
- return fmt.Sprintf(`%s%s %s`, prefix, url, title, suffix)
- })
- if soutErr != nil {
- return "", soutErr
- }
-
- sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
- title := mail
-
- if trunc > 3 && len(title) > trunc {
- title = fmt.Sprintf("%s...", title[:trunc-3])
- }
-
- return fmt.Sprintf(`%s `, mail, title)
- })
-
- return sout, nil
-}
-
-func filterUrlize(in *Value, param *Value) (*Value, *Error) {
- autoescape := true
- if param.IsBool() {
- autoescape = param.Bool()
- }
-
- s, err := filterUrlizeHelper(in.String(), autoescape, -1)
- if err != nil {
-
- }
-
- return AsValue(s), nil
-}
-
-func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
- s, err := filterUrlizeHelper(in.String(), true, param.Integer())
- if err != nil {
- return nil, &Error{
- Sender: "filter:urlizetrunc",
- OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
- }
- }
- return AsValue(s), nil
-}
-
-func filterStringformat(in *Value, param *Value) (*Value, *Error) {
- return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
-}
-
-var reStriptags = regexp.MustCompile("<[^>]*?>")
-
-func filterStriptags(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
-
- // Strip all tags
- s = reStriptags.ReplaceAllString(s, "")
-
- return AsValue(strings.TrimSpace(s)), nil
-}
-
-// https://en.wikipedia.org/wiki/Phoneword
-var filterPhone2numericMap = map[string]string{
- "a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
- "l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
- "w": "9", "x": "9", "y": "9", "z": "9",
-}
-
-func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
- sin := in.String()
- for k, v := range filterPhone2numericMap {
- sin = strings.Replace(sin, k, v, -1)
- sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
- }
- return AsValue(sin), nil
-}
-
-func filterPluralize(in *Value, param *Value) (*Value, *Error) {
- if in.IsNumber() {
- // Works only on numbers
- if param.Len() > 0 {
- endings := strings.Split(param.String(), ",")
- if len(endings) > 2 {
- return nil, &Error{
- Sender: "filter:pluralize",
- OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
- }
- }
- if len(endings) == 1 {
- // 1 argument
- if in.Integer() != 1 {
- return AsValue(endings[0]), nil
- }
- } else {
- if in.Integer() != 1 {
- // 2 arguments
- return AsValue(endings[1]), nil
- }
- return AsValue(endings[0]), nil
- }
- } else {
- if in.Integer() != 1 {
- // return default 's'
- return AsValue("s"), nil
- }
- }
-
- return AsValue(""), nil
- }
- return nil, &Error{
- Sender: "filter:pluralize",
- OrigError: errors.New("filter 'pluralize' does only work on numbers"),
- }
-}
-
-func filterRandom(in *Value, param *Value) (*Value, *Error) {
- if !in.CanSlice() || in.Len() <= 0 {
- return in, nil
- }
- i := rand.Intn(in.Len())
- return in.Index(i), nil
-}
-
-func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- tags := strings.Split(param.String(), ",")
-
- // Strip only specific tags
- for _, tag := range tags {
- re := regexp.MustCompile(fmt.Sprintf("?%s/?>", tag))
- s = re.ReplaceAllString(s, "")
- }
-
- return AsValue(strings.TrimSpace(s)), nil
-}
-
-func filterRjust(in *Value, param *Value) (*Value, *Error) {
- return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
-}
-
-func filterSlice(in *Value, param *Value) (*Value, *Error) {
- comp := strings.Split(param.String(), ":")
- if len(comp) != 2 {
- return nil, &Error{
- Sender: "filter:slice",
- OrigError: errors.New("Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]"),
- }
- }
-
- if !in.CanSlice() {
- return in, nil
- }
-
- from := AsValue(comp[0]).Integer()
- to := in.Len()
-
- if from > to {
- from = to
- }
-
- vto := AsValue(comp[1]).Integer()
- if vto >= from && vto <= in.Len() {
- to = vto
- }
-
- return in.Slice(from, to), nil
-}
-
-func filterTitle(in *Value, param *Value) (*Value, *Error) {
- if !in.IsString() {
- return AsValue(""), nil
- }
- return AsValue(strings.Title(strings.ToLower(in.String()))), nil
-}
-
-func filterWordcount(in *Value, param *Value) (*Value, *Error) {
- return AsValue(len(strings.Fields(in.String()))), nil
-}
-
-func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
- words := strings.Fields(in.String())
- wordsLen := len(words)
- wrapAt := param.Integer()
- if wrapAt <= 0 {
- return in, nil
- }
-
- linecount := wordsLen/wrapAt + wordsLen%wrapAt
- lines := make([]string, 0, linecount)
- for i := 0; i < linecount; i++ {
- lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " "))
- }
- return AsValue(strings.Join(lines, "\n")), nil
-}
-
-func filterYesno(in *Value, param *Value) (*Value, *Error) {
- choices := map[int]string{
- 0: "yes",
- 1: "no",
- 2: "maybe",
- }
- paramString := param.String()
- customChoices := strings.Split(paramString, ",")
- if len(paramString) > 0 {
- if len(customChoices) > 3 {
- return nil, &Error{
- Sender: "filter:yesno",
- OrigError: fmt.Errorf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", paramString),
- }
- }
- if len(customChoices) < 2 {
- return nil, &Error{
- Sender: "filter:yesno",
- OrigError: fmt.Errorf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", paramString),
- }
- }
-
- // Map to the options now
- choices[0] = customChoices[0]
- choices[1] = customChoices[1]
- if len(customChoices) == 3 {
- choices[2] = customChoices[2]
- }
- }
-
- // maybe
- if in.IsNil() {
- return AsValue(choices[2]), nil
- }
-
- // yes
- if in.IsTrue() {
- return AsValue(choices[0]), nil
- }
-
- // no
- return AsValue(choices[1]), nil
-}
diff --git a/vendor/github.com/flosch/pongo2/helpers.go b/vendor/github.com/flosch/pongo2/helpers.go
deleted file mode 100644
index 880dbc04..00000000
--- a/vendor/github.com/flosch/pongo2/helpers.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package pongo2
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/flosch/pongo2/lexer.go b/vendor/github.com/flosch/pongo2/lexer.go
deleted file mode 100644
index f1897984..00000000
--- a/vendor/github.com/flosch/pongo2/lexer.go
+++ /dev/null
@@ -1,432 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "strings"
- "unicode/utf8"
-
- "errors"
-)
-
-const (
- TokenError = iota
- EOF
-
- TokenHTML
-
- TokenKeyword
- TokenIdentifier
- TokenString
- TokenNumber
- TokenSymbol
-)
-
-var (
- tokenSpaceChars = " \n\r\t"
- tokenIdentifierChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
- tokenIdentifierCharsWithDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789"
- tokenDigits = "0123456789"
-
- // Available symbols in pongo2 (within filters/tag)
- TokenSymbols = []string{
- // 3-Char symbols
- "{{-", "-}}", "{%-", "-%}",
-
- // 2-Char symbols
- "==", ">=", "<=", "&&", "||", "{{", "}}", "{%", "%}", "!=", "<>",
-
- // 1-Char symbol
- "(", ")", "+", "-", "*", "<", ">", "/", "^", ",", ".", "!", "|", ":", "=", "%",
- }
-
- // Available keywords in pongo2
- TokenKeywords = []string{"in", "and", "or", "not", "true", "false", "as", "export"}
-)
-
-type TokenType int
-type Token struct {
- Filename string
- Typ TokenType
- Val string
- Line int
- Col int
- TrimWhitespaces bool
-}
-
-type lexerStateFn func() lexerStateFn
-type lexer struct {
- name string
- input string
- start int // start pos of the item
- pos int // current pos
- width int // width of last rune
- tokens []*Token
- errored bool
- startline int
- startcol int
- line int
- col int
-
- inVerbatim bool
- verbatimName string
-}
-
-func (t *Token) String() string {
- val := t.Val
- if len(val) > 1000 {
- val = fmt.Sprintf("%s...%s", val[:10], val[len(val)-5:])
- }
-
- typ := ""
- switch t.Typ {
- case TokenHTML:
- typ = "HTML"
- case TokenError:
- typ = "Error"
- case TokenIdentifier:
- typ = "Identifier"
- case TokenKeyword:
- typ = "Keyword"
- case TokenNumber:
- typ = "Number"
- case TokenString:
- typ = "String"
- case TokenSymbol:
- typ = "Symbol"
- default:
- typ = "Unknown"
- }
-
- return fmt.Sprintf("",
- typ, t.Typ, val, t.Line, t.Col, t.TrimWhitespaces)
-}
-
-func lex(name string, input string) ([]*Token, *Error) {
- l := &lexer{
- name: name,
- input: input,
- tokens: make([]*Token, 0, 100),
- line: 1,
- col: 1,
- startline: 1,
- startcol: 1,
- }
- l.run()
- if l.errored {
- errtoken := l.tokens[len(l.tokens)-1]
- return nil, &Error{
- Filename: name,
- Line: errtoken.Line,
- Column: errtoken.Col,
- Sender: "lexer",
- OrigError: errors.New(errtoken.Val),
- }
- }
- return l.tokens, nil
-}
-
-func (l *lexer) value() string {
- return l.input[l.start:l.pos]
-}
-
-func (l *lexer) length() int {
- return l.pos - l.start
-}
-
-func (l *lexer) emit(t TokenType) {
- tok := &Token{
- Filename: l.name,
- Typ: t,
- Val: l.value(),
- Line: l.startline,
- Col: l.startcol,
- }
-
- if t == TokenString {
- // Escape sequence \" in strings
- tok.Val = strings.Replace(tok.Val, `\"`, `"`, -1)
- tok.Val = strings.Replace(tok.Val, `\\`, `\`, -1)
- }
-
- if t == TokenSymbol && len(tok.Val) == 3 && (strings.HasSuffix(tok.Val, "-") || strings.HasPrefix(tok.Val, "-")) {
- tok.TrimWhitespaces = true
- tok.Val = strings.Replace(tok.Val, "-", "", -1)
- }
-
- l.tokens = append(l.tokens, tok)
- l.start = l.pos
- l.startline = l.line
- l.startcol = l.col
-}
-
-func (l *lexer) next() rune {
- if l.pos >= len(l.input) {
- l.width = 0
- return EOF
- }
- r, w := utf8.DecodeRuneInString(l.input[l.pos:])
- l.width = w
- l.pos += l.width
- l.col += l.width
- return r
-}
-
-func (l *lexer) backup() {
- l.pos -= l.width
- l.col -= l.width
-}
-
-func (l *lexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-func (l *lexer) ignore() {
- l.start = l.pos
- l.startline = l.line
- l.startcol = l.col
-}
-
-func (l *lexer) accept(what string) bool {
- if strings.IndexRune(what, l.next()) >= 0 {
- return true
- }
- l.backup()
- return false
-}
-
-func (l *lexer) acceptRun(what string) {
- for strings.IndexRune(what, l.next()) >= 0 {
- }
- l.backup()
-}
-
-func (l *lexer) errorf(format string, args ...interface{}) lexerStateFn {
- t := &Token{
- Filename: l.name,
- Typ: TokenError,
- Val: fmt.Sprintf(format, args...),
- Line: l.startline,
- Col: l.startcol,
- }
- l.tokens = append(l.tokens, t)
- l.errored = true
- l.startline = l.line
- l.startcol = l.col
- return nil
-}
-
-func (l *lexer) eof() bool {
- return l.start >= len(l.input)-1
-}
-
-func (l *lexer) run() {
- for {
- // TODO: Support verbatim tag names
- // https://docs.djangoproject.com/en/dev/ref/templates/builtins/#verbatim
- if l.inVerbatim {
- name := l.verbatimName
- if name != "" {
- name += " "
- }
- if strings.HasPrefix(l.input[l.pos:], fmt.Sprintf("{%% endverbatim %s%%}", name)) { // end verbatim
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- w := len("{% endverbatim %}")
- l.pos += w
- l.col += w
- l.ignore()
- l.inVerbatim = false
- }
- } else if strings.HasPrefix(l.input[l.pos:], "{% verbatim %}") { // tag
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- l.inVerbatim = true
- w := len("{% verbatim %}")
- l.pos += w
- l.col += w
- l.ignore()
- }
-
- if !l.inVerbatim {
- // Ignore single-line comments {# ... #}
- if strings.HasPrefix(l.input[l.pos:], "{#") {
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
-
- l.pos += 2 // pass '{#'
- l.col += 2
-
- for {
- switch l.peek() {
- case EOF:
- l.errorf("Single-line comment not closed.")
- return
- case '\n':
- l.errorf("Newline not permitted in a single-line comment.")
- return
- }
-
- if strings.HasPrefix(l.input[l.pos:], "#}") {
- l.pos += 2 // pass '#}'
- l.col += 2
- break
- }
-
- l.next()
- }
- l.ignore() // ignore whole comment
-
- // Comment skipped
- continue // next token
- }
-
- if strings.HasPrefix(l.input[l.pos:], "{{") || // variable
- strings.HasPrefix(l.input[l.pos:], "{%") { // tag
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- l.tokenize()
- if l.errored {
- return
- }
- continue
- }
- }
-
- switch l.peek() {
- case '\n':
- l.line++
- l.col = 0
- }
- if l.next() == EOF {
- break
- }
- }
-
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
-
- if l.inVerbatim {
- l.errorf("verbatim-tag not closed, got EOF.")
- }
-}
-
-func (l *lexer) tokenize() {
- for state := l.stateCode; state != nil; {
- state = state()
- }
-}
-
-func (l *lexer) stateCode() lexerStateFn {
-outer_loop:
- for {
- switch {
- case l.accept(tokenSpaceChars):
- if l.value() == "\n" {
- return l.errorf("Newline not allowed within tag/variable.")
- }
- l.ignore()
- continue
- case l.accept(tokenIdentifierChars):
- return l.stateIdentifier
- case l.accept(tokenDigits):
- return l.stateNumber
- case l.accept(`"'`):
- return l.stateString
- }
-
- // Check for symbol
- for _, sym := range TokenSymbols {
- if strings.HasPrefix(l.input[l.start:], sym) {
- l.pos += len(sym)
- l.col += l.length()
- l.emit(TokenSymbol)
-
- if sym == "%}" || sym == "-%}" || sym == "}}" || sym == "-}}" {
- // Tag/variable end, return after emit
- return nil
- }
-
- continue outer_loop
- }
- }
-
- break
- }
-
- // Normal shut down
- return nil
-}
-
-func (l *lexer) stateIdentifier() lexerStateFn {
- l.acceptRun(tokenIdentifierChars)
- l.acceptRun(tokenIdentifierCharsWithDigits)
- for _, kw := range TokenKeywords {
- if kw == l.value() {
- l.emit(TokenKeyword)
- return l.stateCode
- }
- }
- l.emit(TokenIdentifier)
- return l.stateCode
-}
-
-func (l *lexer) stateNumber() lexerStateFn {
- l.acceptRun(tokenDigits)
- if l.accept(tokenIdentifierCharsWithDigits) {
- // This seems to be an identifier starting with a number.
- // See https://github.com/flosch/pongo2/issues/151
- return l.stateIdentifier()
- }
- /*
- Maybe context-sensitive number lexing?
- * comments.0.Text // first comment
- * usercomments.1.0 // second user, first comment
- * if (score >= 8.5) // 8.5 as a number
-
- if l.peek() == '.' {
- l.accept(".")
- if !l.accept(tokenDigits) {
- return l.errorf("Malformed number.")
- }
- l.acceptRun(tokenDigits)
- }
- */
- l.emit(TokenNumber)
- return l.stateCode
-}
-
-func (l *lexer) stateString() lexerStateFn {
- quotationMark := l.value()
- l.ignore()
- l.startcol-- // we're starting the position at the first "
- for !l.accept(quotationMark) {
- switch l.next() {
- case '\\':
- // escape sequence
- switch l.peek() {
- case '"', '\\':
- l.next()
- default:
- return l.errorf("Unknown escape sequence: \\%c", l.peek())
- }
- case EOF:
- return l.errorf("Unexpected EOF, string not closed.")
- case '\n':
- return l.errorf("Newline in string is not allowed.")
- }
- }
- l.backup()
- l.emit(TokenString)
-
- l.next()
- l.ignore()
-
- return l.stateCode
-}
diff --git a/vendor/github.com/flosch/pongo2/nodes.go b/vendor/github.com/flosch/pongo2/nodes.go
deleted file mode 100644
index 5b039cdf..00000000
--- a/vendor/github.com/flosch/pongo2/nodes.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package pongo2
-
-// The root document
-type nodeDocument struct {
- Nodes []INode
-}
-
-func (doc *nodeDocument) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, n := range doc.Nodes {
- err := n.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/nodes_html.go b/vendor/github.com/flosch/pongo2/nodes_html.go
deleted file mode 100644
index b980a3a5..00000000
--- a/vendor/github.com/flosch/pongo2/nodes_html.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package pongo2
-
-import (
- "strings"
-)
-
-type nodeHTML struct {
- token *Token
- trimLeft bool
- trimRight bool
-}
-
-func (n *nodeHTML) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- res := n.token.Val
- if n.trimLeft {
- res = strings.TrimLeft(res, tokenSpaceChars)
- }
- if n.trimRight {
- res = strings.TrimRight(res, tokenSpaceChars)
- }
- writer.WriteString(res)
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/nodes_wrapper.go b/vendor/github.com/flosch/pongo2/nodes_wrapper.go
deleted file mode 100644
index d1bcb8d8..00000000
--- a/vendor/github.com/flosch/pongo2/nodes_wrapper.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package pongo2
-
-type NodeWrapper struct {
- Endtag string
- nodes []INode
-}
-
-func (wrapper *NodeWrapper) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, n := range wrapper.nodes {
- err := n.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/options.go b/vendor/github.com/flosch/pongo2/options.go
deleted file mode 100644
index 9c39e467..00000000
--- a/vendor/github.com/flosch/pongo2/options.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package pongo2
-
-// Options allow you to change the behavior of template-engine.
-// You can change the options before calling the Execute method.
-type Options struct {
- // If this is set to true the first newline after a block is removed (block, not variable tag!). Defaults to false.
- TrimBlocks bool
-
- // If this is set to true leading spaces and tabs are stripped from the start of a line to a block. Defaults to false
- LStripBlocks bool
-}
-
-func newOptions() *Options {
- return &Options{
- TrimBlocks: false,
- LStripBlocks: false,
- }
-}
-
-// Update updates this options from another options.
-func (opt *Options) Update(other *Options) *Options {
- opt.TrimBlocks = other.TrimBlocks
- opt.LStripBlocks = other.LStripBlocks
-
- return opt
-}
diff --git a/vendor/github.com/flosch/pongo2/parser.go b/vendor/github.com/flosch/pongo2/parser.go
deleted file mode 100644
index 19553f17..00000000
--- a/vendor/github.com/flosch/pongo2/parser.go
+++ /dev/null
@@ -1,309 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "strings"
-
- "errors"
-)
-
-type INode interface {
- Execute(*ExecutionContext, TemplateWriter) *Error
-}
-
-type IEvaluator interface {
- INode
- GetPositionToken() *Token
- Evaluate(*ExecutionContext) (*Value, *Error)
- FilterApplied(name string) bool
-}
-
-// The parser provides you a comprehensive and easy tool to
-// work with the template document and arguments provided by
-// the user for your custom tag.
-//
-// The parser works on a token list which will be provided by pongo2.
-// A token is a unit you can work with. Tokens are either of type identifier,
-// string, number, keyword, HTML or symbol.
-//
-// (See Token's documentation for more about tokens)
-type Parser struct {
- name string
- idx int
- tokens []*Token
- lastToken *Token
-
- // if the parser parses a template document, here will be
- // a reference to it (needed to access the template through Tags)
- template *Template
-}
-
-// Creates a new parser to parse tokens.
-// Used inside pongo2 to parse documents and to provide an easy-to-use
-// parser for tag authors
-func newParser(name string, tokens []*Token, template *Template) *Parser {
- p := &Parser{
- name: name,
- tokens: tokens,
- template: template,
- }
- if len(tokens) > 0 {
- p.lastToken = tokens[len(tokens)-1]
- }
- return p
-}
-
-// Consume one token. It will be gone forever.
-func (p *Parser) Consume() {
- p.ConsumeN(1)
-}
-
-// Consume N tokens. They will be gone forever.
-func (p *Parser) ConsumeN(count int) {
- p.idx += count
-}
-
-// Returns the current token.
-func (p *Parser) Current() *Token {
- return p.Get(p.idx)
-}
-
-// Returns the CURRENT token if the given type matches.
-// Consumes this token on success.
-func (p *Parser) MatchType(typ TokenType) *Token {
- if t := p.PeekType(typ); t != nil {
- p.Consume()
- return t
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type AND value matches.
-// Consumes this token on success.
-func (p *Parser) Match(typ TokenType, val string) *Token {
- if t := p.Peek(typ, val); t != nil {
- p.Consume()
- return t
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type AND *one* of
-// the given values matches.
-// Consumes this token on success.
-func (p *Parser) MatchOne(typ TokenType, vals ...string) *Token {
- for _, val := range vals {
- if t := p.Peek(typ, val); t != nil {
- p.Consume()
- return t
- }
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type matches.
-// It DOES NOT consume the token.
-func (p *Parser) PeekType(typ TokenType) *Token {
- return p.PeekTypeN(0, typ)
-}
-
-// Returns the CURRENT token if the given type AND value matches.
-// It DOES NOT consume the token.
-func (p *Parser) Peek(typ TokenType, val string) *Token {
- return p.PeekN(0, typ, val)
-}
-
-// Returns the CURRENT token if the given type AND *one* of
-// the given values matches.
-// It DOES NOT consume the token.
-func (p *Parser) PeekOne(typ TokenType, vals ...string) *Token {
- for _, v := range vals {
- t := p.PeekN(0, typ, v)
- if t != nil {
- return t
- }
- }
- return nil
-}
-
-// Returns the tokens[current position + shift] token if the
-// given type AND value matches for that token.
-// DOES NOT consume the token.
-func (p *Parser) PeekN(shift int, typ TokenType, val string) *Token {
- t := p.Get(p.idx + shift)
- if t != nil {
- if t.Typ == typ && t.Val == val {
- return t
- }
- }
- return nil
-}
-
-// Returns the tokens[current position + shift] token if the given type matches.
-// DOES NOT consume the token for that token.
-func (p *Parser) PeekTypeN(shift int, typ TokenType) *Token {
- t := p.Get(p.idx + shift)
- if t != nil {
- if t.Typ == typ {
- return t
- }
- }
- return nil
-}
-
-// Returns the UNCONSUMED token count.
-func (p *Parser) Remaining() int {
- return len(p.tokens) - p.idx
-}
-
-// Returns the total token count.
-func (p *Parser) Count() int {
- return len(p.tokens)
-}
-
-// Returns tokens[i] or NIL (if i >= len(tokens))
-func (p *Parser) Get(i int) *Token {
- if i < len(p.tokens) && i >= 0 {
- return p.tokens[i]
- }
- return nil
-}
-
-// Returns tokens[current-position + shift] or NIL
-// (if (current-position + i) >= len(tokens))
-func (p *Parser) GetR(shift int) *Token {
- i := p.idx + shift
- return p.Get(i)
-}
-
-// Error produces a nice error message and returns an error-object.
-// The 'token'-argument is optional. If provided, it will take
-// the token's position information. If not provided, it will
-// automatically use the CURRENT token's position information.
-func (p *Parser) Error(msg string, token *Token) *Error {
- if token == nil {
- // Set current token
- token = p.Current()
- if token == nil {
- // Set to last token
- if len(p.tokens) > 0 {
- token = p.tokens[len(p.tokens)-1]
- }
- }
- }
- var line, col int
- if token != nil {
- line = token.Line
- col = token.Col
- }
- return &Error{
- Template: p.template,
- Filename: p.name,
- Sender: "parser",
- Line: line,
- Column: col,
- Token: token,
- OrigError: errors.New(msg),
- }
-}
-
-// Wraps all nodes between starting tag and "{% endtag %}" and provides
-// one simple interface to execute the wrapped nodes.
-// It returns a parser to process provided arguments to the tag.
-func (p *Parser) WrapUntilTag(names ...string) (*NodeWrapper, *Parser, *Error) {
- wrapper := &NodeWrapper{}
-
- var tagArgs []*Token
-
- for p.Remaining() > 0 {
- // New tag, check whether we have to stop wrapping here
- if p.Peek(TokenSymbol, "{%") != nil {
- tagIdent := p.PeekTypeN(1, TokenIdentifier)
-
- if tagIdent != nil {
- // We've found a (!) end-tag
-
- found := false
- for _, n := range names {
- if tagIdent.Val == n {
- found = true
- break
- }
- }
-
- // We only process the tag if we've found an end tag
- if found {
- // Okay, endtag found.
- p.ConsumeN(2) // '{%' tagname
-
- for {
- if p.Match(TokenSymbol, "%}") != nil {
- // Okay, end the wrapping here
- wrapper.Endtag = tagIdent.Val
- return wrapper, newParser(p.template.name, tagArgs, p.template), nil
- }
- t := p.Current()
- p.Consume()
- if t == nil {
- return nil, nil, p.Error("Unexpected EOF.", p.lastToken)
- }
- tagArgs = append(tagArgs, t)
- }
- }
- }
-
- }
-
- // Otherwise process next element to be wrapped
- node, err := p.parseDocElement()
- if err != nil {
- return nil, nil, err
- }
- wrapper.nodes = append(wrapper.nodes, node)
- }
-
- return nil, nil, p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")),
- p.lastToken)
-}
-
-// Skips all nodes between starting tag and "{% endtag %}"
-func (p *Parser) SkipUntilTag(names ...string) *Error {
- for p.Remaining() > 0 {
- // New tag, check whether we have to stop wrapping here
- if p.Peek(TokenSymbol, "{%") != nil {
- tagIdent := p.PeekTypeN(1, TokenIdentifier)
-
- if tagIdent != nil {
- // We've found a (!) end-tag
-
- found := false
- for _, n := range names {
- if tagIdent.Val == n {
- found = true
- break
- }
- }
-
- // We only process the tag if we've found an end tag
- if found {
- // Okay, endtag found.
- p.ConsumeN(2) // '{%' tagname
-
- for {
- if p.Match(TokenSymbol, "%}") != nil {
- // Done skipping, exit.
- return nil
- }
- }
- }
- }
- }
- t := p.Current()
- p.Consume()
- if t == nil {
- return p.Error("Unexpected EOF.", p.lastToken)
- }
- }
-
- return p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")), p.lastToken)
-}
diff --git a/vendor/github.com/flosch/pongo2/parser_document.go b/vendor/github.com/flosch/pongo2/parser_document.go
deleted file mode 100644
index e3ac2c8e..00000000
--- a/vendor/github.com/flosch/pongo2/parser_document.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package pongo2
-
-// Doc = { ( Filter | Tag | HTML ) }
-func (p *Parser) parseDocElement() (INode, *Error) {
- t := p.Current()
-
- switch t.Typ {
- case TokenHTML:
- n := &nodeHTML{token: t}
- left := p.PeekTypeN(-1, TokenSymbol)
- right := p.PeekTypeN(1, TokenSymbol)
- n.trimLeft = left != nil && left.TrimWhitespaces
- n.trimRight = right != nil && right.TrimWhitespaces
- p.Consume() // consume HTML element
- return n, nil
- case TokenSymbol:
- switch t.Val {
- case "{{":
- // parse variable
- variable, err := p.parseVariableElement()
- if err != nil {
- return nil, err
- }
- return variable, nil
- case "{%":
- // parse tag
- tag, err := p.parseTagElement()
- if err != nil {
- return nil, err
- }
- return tag, nil
- }
- }
- return nil, p.Error("Unexpected token (only HTML/tags/filters in templates allowed)", t)
-}
-
-func (tpl *Template) parse() *Error {
- tpl.parser = newParser(tpl.name, tpl.tokens, tpl)
- doc, err := tpl.parser.parseDocument()
- if err != nil {
- return err
- }
- tpl.root = doc
- return nil
-}
-
-func (p *Parser) parseDocument() (*nodeDocument, *Error) {
- doc := &nodeDocument{}
-
- for p.Remaining() > 0 {
- node, err := p.parseDocElement()
- if err != nil {
- return nil, err
- }
- doc.Nodes = append(doc.Nodes, node)
- }
-
- return doc, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/parser_expression.go b/vendor/github.com/flosch/pongo2/parser_expression.go
deleted file mode 100644
index 215b0afb..00000000
--- a/vendor/github.com/flosch/pongo2/parser_expression.go
+++ /dev/null
@@ -1,517 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "math"
-)
-
-type Expression struct {
- // TODO: Add location token?
- expr1 IEvaluator
- expr2 IEvaluator
- opToken *Token
-}
-
-type relationalExpression struct {
- // TODO: Add location token?
- expr1 IEvaluator
- expr2 IEvaluator
- opToken *Token
-}
-
-type simpleExpression struct {
- negate bool
- negativeSign bool
- term1 IEvaluator
- term2 IEvaluator
- opToken *Token
-}
-
-type term struct {
- // TODO: Add location token?
- factor1 IEvaluator
- factor2 IEvaluator
- opToken *Token
-}
-
-type power struct {
- // TODO: Add location token?
- power1 IEvaluator
- power2 IEvaluator
-}
-
-func (expr *Expression) FilterApplied(name string) bool {
- return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
- (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
-}
-
-func (expr *relationalExpression) FilterApplied(name string) bool {
- return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
- (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
-}
-
-func (expr *simpleExpression) FilterApplied(name string) bool {
- return expr.term1.FilterApplied(name) && (expr.term2 == nil ||
- (expr.term2 != nil && expr.term2.FilterApplied(name)))
-}
-
-func (expr *term) FilterApplied(name string) bool {
- return expr.factor1.FilterApplied(name) && (expr.factor2 == nil ||
- (expr.factor2 != nil && expr.factor2.FilterApplied(name)))
-}
-
-func (expr *power) FilterApplied(name string) bool {
- return expr.power1.FilterApplied(name) && (expr.power2 == nil ||
- (expr.power2 != nil && expr.power2.FilterApplied(name)))
-}
-
-func (expr *Expression) GetPositionToken() *Token {
- return expr.expr1.GetPositionToken()
-}
-
-func (expr *relationalExpression) GetPositionToken() *Token {
- return expr.expr1.GetPositionToken()
-}
-
-func (expr *simpleExpression) GetPositionToken() *Token {
- return expr.term1.GetPositionToken()
-}
-
-func (expr *term) GetPositionToken() *Token {
- return expr.factor1.GetPositionToken()
-}
-
-func (expr *power) GetPositionToken() *Token {
- return expr.power1.GetPositionToken()
-}
-
-func (expr *Expression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *relationalExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *simpleExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *term) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *power) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *Expression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- v1, err := expr.expr1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.expr2 != nil {
- switch expr.opToken.Val {
- case "and", "&&":
- if !v1.IsTrue() {
- return AsValue(false), nil
- } else {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(v2.IsTrue()), nil
- }
- case "or", "||":
- if v1.IsTrue() {
- return AsValue(true), nil
- } else {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(v2.IsTrue()), nil
- }
- default:
- return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
- }
- } else {
- return v1, nil
- }
-}
-
-func (expr *relationalExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- v1, err := expr.expr1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.expr2 != nil {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "<=":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() <= v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- tm1, tm2 := v1.Time(), v2.Time()
- return AsValue(tm1.Before(tm2) || tm1.Equal(tm2)), nil
- }
- return AsValue(v1.Integer() <= v2.Integer()), nil
- case ">=":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() >= v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- tm1, tm2 := v1.Time(), v2.Time()
- return AsValue(tm1.After(tm2) || tm1.Equal(tm2)), nil
- }
- return AsValue(v1.Integer() >= v2.Integer()), nil
- case "==":
- return AsValue(v1.EqualValueTo(v2)), nil
- case ">":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() > v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- return AsValue(v1.Time().After(v2.Time())), nil
- }
- return AsValue(v1.Integer() > v2.Integer()), nil
- case "<":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() < v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- return AsValue(v1.Time().Before(v2.Time())), nil
- }
- return AsValue(v1.Integer() < v2.Integer()), nil
- case "!=", "<>":
- return AsValue(!v1.EqualValueTo(v2)), nil
- case "in":
- return AsValue(v2.Contains(v1)), nil
- default:
- return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
- }
- } else {
- return v1, nil
- }
-}
-
-func (expr *simpleExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- t1, err := expr.term1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- result := t1
-
- if expr.negate {
- result = result.Negate()
- }
-
- if expr.negativeSign {
- if result.IsNumber() {
- switch {
- case result.IsFloat():
- result = AsValue(-1 * result.Float())
- case result.IsInteger():
- result = AsValue(-1 * result.Integer())
- default:
- return nil, ctx.Error("Operation between a number and a non-(float/integer) is not possible", nil)
- }
- } else {
- return nil, ctx.Error("Negative sign on a non-number expression", expr.GetPositionToken())
- }
- }
-
- if expr.term2 != nil {
- t2, err := expr.term2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "+":
- if result.IsFloat() || t2.IsFloat() {
- // Result will be a float
- return AsValue(result.Float() + t2.Float()), nil
- }
- // Result will be an integer
- return AsValue(result.Integer() + t2.Integer()), nil
- case "-":
- if result.IsFloat() || t2.IsFloat() {
- // Result will be a float
- return AsValue(result.Float() - t2.Float()), nil
- }
- // Result will be an integer
- return AsValue(result.Integer() - t2.Integer()), nil
- default:
- return nil, ctx.Error("Unimplemented", expr.GetPositionToken())
- }
- }
-
- return result, nil
-}
-
-func (expr *term) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- f1, err := expr.factor1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.factor2 != nil {
- f2, err := expr.factor2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "*":
- if f1.IsFloat() || f2.IsFloat() {
- // Result will be float
- return AsValue(f1.Float() * f2.Float()), nil
- }
- // Result will be int
- return AsValue(f1.Integer() * f2.Integer()), nil
- case "/":
- if f1.IsFloat() || f2.IsFloat() {
- // Result will be float
- return AsValue(f1.Float() / f2.Float()), nil
- }
- // Result will be int
- return AsValue(f1.Integer() / f2.Integer()), nil
- case "%":
- // Result will be int
- return AsValue(f1.Integer() % f2.Integer()), nil
- default:
- return nil, ctx.Error("unimplemented", expr.opToken)
- }
- } else {
- return f1, nil
- }
-}
-
-func (expr *power) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- p1, err := expr.power1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.power2 != nil {
- p2, err := expr.power2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(math.Pow(p1.Float(), p2.Float())), nil
- }
- return p1, nil
-}
-
-func (p *Parser) parseFactor() (IEvaluator, *Error) {
- if p.Match(TokenSymbol, "(") != nil {
- expr, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- if p.Match(TokenSymbol, ")") == nil {
- return nil, p.Error("Closing bracket expected after expression", nil)
- }
- return expr, nil
- }
-
- return p.parseVariableOrLiteralWithFilter()
-}
-
-func (p *Parser) parsePower() (IEvaluator, *Error) {
- pw := new(power)
-
- power1, err := p.parseFactor()
- if err != nil {
- return nil, err
- }
- pw.power1 = power1
-
- if p.Match(TokenSymbol, "^") != nil {
- power2, err := p.parsePower()
- if err != nil {
- return nil, err
- }
- pw.power2 = power2
- }
-
- if pw.power2 == nil {
- // Shortcut for faster evaluation
- return pw.power1, nil
- }
-
- return pw, nil
-}
-
-func (p *Parser) parseTerm() (IEvaluator, *Error) {
- returnTerm := new(term)
-
- factor1, err := p.parsePower()
- if err != nil {
- return nil, err
- }
- returnTerm.factor1 = factor1
-
- for p.PeekOne(TokenSymbol, "*", "/", "%") != nil {
- if returnTerm.opToken != nil {
- // Create new sub-term
- returnTerm = &term{
- factor1: returnTerm,
- }
- }
-
- op := p.Current()
- p.Consume()
-
- factor2, err := p.parsePower()
- if err != nil {
- return nil, err
- }
-
- returnTerm.opToken = op
- returnTerm.factor2 = factor2
- }
-
- if returnTerm.opToken == nil {
- // Shortcut for faster evaluation
- return returnTerm.factor1, nil
- }
-
- return returnTerm, nil
-}
-
-func (p *Parser) parseSimpleExpression() (IEvaluator, *Error) {
- expr := new(simpleExpression)
-
- if sign := p.MatchOne(TokenSymbol, "+", "-"); sign != nil {
- if sign.Val == "-" {
- expr.negativeSign = true
- }
- }
-
- if p.Match(TokenSymbol, "!") != nil || p.Match(TokenKeyword, "not") != nil {
- expr.negate = true
- }
-
- term1, err := p.parseTerm()
- if err != nil {
- return nil, err
- }
- expr.term1 = term1
-
- for p.PeekOne(TokenSymbol, "+", "-") != nil {
- if expr.opToken != nil {
- // New sub expr
- expr = &simpleExpression{
- term1: expr,
- }
- }
-
- op := p.Current()
- p.Consume()
-
- term2, err := p.parseTerm()
- if err != nil {
- return nil, err
- }
-
- expr.term2 = term2
- expr.opToken = op
- }
-
- if expr.negate == false && expr.negativeSign == false && expr.term2 == nil {
- // Shortcut for faster evaluation
- return expr.term1, nil
- }
-
- return expr, nil
-}
-
-func (p *Parser) parseRelationalExpression() (IEvaluator, *Error) {
- expr1, err := p.parseSimpleExpression()
- if err != nil {
- return nil, err
- }
-
- expr := &relationalExpression{
- expr1: expr1,
- }
-
- if t := p.MatchOne(TokenSymbol, "==", "<=", ">=", "!=", "<>", ">", "<"); t != nil {
- expr2, err := p.parseRelationalExpression()
- if err != nil {
- return nil, err
- }
- expr.opToken = t
- expr.expr2 = expr2
- } else if t := p.MatchOne(TokenKeyword, "in"); t != nil {
- expr2, err := p.parseSimpleExpression()
- if err != nil {
- return nil, err
- }
- expr.opToken = t
- expr.expr2 = expr2
- }
-
- if expr.expr2 == nil {
- // Shortcut for faster evaluation
- return expr.expr1, nil
- }
-
- return expr, nil
-}
-
-func (p *Parser) ParseExpression() (IEvaluator, *Error) {
- rexpr1, err := p.parseRelationalExpression()
- if err != nil {
- return nil, err
- }
-
- exp := &Expression{
- expr1: rexpr1,
- }
-
- if p.PeekOne(TokenSymbol, "&&", "||") != nil || p.PeekOne(TokenKeyword, "and", "or") != nil {
- op := p.Current()
- p.Consume()
- expr2, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- exp.expr2 = expr2
- exp.opToken = op
- }
-
- if exp.expr2 == nil {
- // Shortcut for faster evaluation
- return exp.expr1, nil
- }
-
- return exp, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/pongo2.go b/vendor/github.com/flosch/pongo2/pongo2.go
deleted file mode 100644
index eda3aa07..00000000
--- a/vendor/github.com/flosch/pongo2/pongo2.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package pongo2
-
-// Version string
-const Version = "dev"
-
-// Must panics, if a Template couldn't successfully parsed. This is how you
-// would use it:
-// var baseTemplate = pongo2.Must(pongo2.FromFile("templates/base.html"))
-func Must(tpl *Template, err error) *Template {
- if err != nil {
- panic(err)
- }
- return tpl
-}
diff --git a/vendor/github.com/flosch/pongo2/tags.go b/vendor/github.com/flosch/pongo2/tags.go
deleted file mode 100644
index 710ee252..00000000
--- a/vendor/github.com/flosch/pongo2/tags.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package pongo2
-
-/* Incomplete:
- -----------
-
- verbatim (only the "name" argument is missing for verbatim)
-
- Reconsideration:
- ----------------
-
- debug (reason: not sure what to output yet)
- regroup / Grouping on other properties (reason: maybe too python-specific; not sure how useful this would be in Go)
-
- Following built-in tags wont be added:
- --------------------------------------
-
- csrf_token (reason: web-framework specific)
- load (reason: python-specific)
- url (reason: web-framework specific)
-*/
-
-import (
- "fmt"
-)
-
-type INodeTag interface {
- INode
-}
-
-// This is the function signature of the tag's parser you will have
-// to implement in order to create a new tag.
-//
-// 'doc' is providing access to the whole document while 'arguments'
-// is providing access to the user's arguments to the tag:
-//
-// {% your_tag_name some "arguments" 123 %}
-//
-// start_token will be the *Token with the tag's name in it (here: your_tag_name).
-//
-// Please see the Parser documentation on how to use the parser.
-// See RegisterTag()'s documentation for more information about
-// writing a tag as well.
-type TagParser func(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error)
-
-type tag struct {
- name string
- parser TagParser
-}
-
-var tags map[string]*tag
-
-func init() {
- tags = make(map[string]*tag)
-}
-
-// Registers a new tag. You usually want to call this
-// function in the tag's init() function:
-// http://golang.org/doc/effective_go.html#init
-//
-// See http://www.florian-schlachter.de/post/pongo2/ for more about
-// writing filters and tags.
-func RegisterTag(name string, parserFn TagParser) error {
- _, existing := tags[name]
- if existing {
- return fmt.Errorf("tag with name '%s' is already registered", name)
- }
- tags[name] = &tag{
- name: name,
- parser: parserFn,
- }
- return nil
-}
-
-// Replaces an already registered tag with a new implementation. Use this
-// function with caution since it allows you to change existing tag behaviour.
-func ReplaceTag(name string, parserFn TagParser) error {
- _, existing := tags[name]
- if !existing {
- return fmt.Errorf("tag with name '%s' does not exist (therefore cannot be overridden)", name)
- }
- tags[name] = &tag{
- name: name,
- parser: parserFn,
- }
- return nil
-}
-
-// Tag = "{%" IDENT ARGS "%}"
-func (p *Parser) parseTagElement() (INodeTag, *Error) {
- p.Consume() // consume "{%"
- tokenName := p.MatchType(TokenIdentifier)
-
- // Check for identifier
- if tokenName == nil {
- return nil, p.Error("Tag name must be an identifier.", nil)
- }
-
- // Check for the existing tag
- tag, exists := tags[tokenName.Val]
- if !exists {
- // Does not exists
- return nil, p.Error(fmt.Sprintf("Tag '%s' not found (or beginning tag not provided)", tokenName.Val), tokenName)
- }
-
- // Check sandbox tag restriction
- if _, isBanned := p.template.set.bannedTags[tokenName.Val]; isBanned {
- return nil, p.Error(fmt.Sprintf("Usage of tag '%s' is not allowed (sandbox restriction active).", tokenName.Val), tokenName)
- }
-
- var argsToken []*Token
- for p.Peek(TokenSymbol, "%}") == nil && p.Remaining() > 0 {
- // Add token to args
- argsToken = append(argsToken, p.Current())
- p.Consume() // next token
- }
-
- // EOF?
- if p.Remaining() == 0 {
- return nil, p.Error("Unexpectedly reached EOF, no tag end found.", p.lastToken)
- }
-
- p.Match(TokenSymbol, "%}")
-
- argParser := newParser(p.name, argsToken, p.template)
- if len(argsToken) == 0 {
- // This is done to have nice EOF error messages
- argParser.lastToken = tokenName
- }
-
- p.template.level++
- defer func() { p.template.level-- }()
- return tag.parser(p, tokenName, argParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_autoescape.go b/vendor/github.com/flosch/pongo2/tags_autoescape.go
deleted file mode 100644
index 590a1db3..00000000
--- a/vendor/github.com/flosch/pongo2/tags_autoescape.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package pongo2
-
-type tagAutoescapeNode struct {
- wrapper *NodeWrapper
- autoescape bool
-}
-
-func (node *tagAutoescapeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- old := ctx.Autoescape
- ctx.Autoescape = node.autoescape
-
- err := node.wrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
-
- ctx.Autoescape = old
-
- return nil
-}
-
-func tagAutoescapeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- autoescapeNode := &tagAutoescapeNode{}
-
- wrapper, _, err := doc.WrapUntilTag("endautoescape")
- if err != nil {
- return nil, err
- }
- autoescapeNode.wrapper = wrapper
-
- modeToken := arguments.MatchType(TokenIdentifier)
- if modeToken == nil {
- return nil, arguments.Error("A mode is required for autoescape-tag.", nil)
- }
- if modeToken.Val == "on" {
- autoescapeNode.autoescape = true
- } else if modeToken.Val == "off" {
- autoescapeNode.autoescape = false
- } else {
- return nil, arguments.Error("Only 'on' or 'off' is valid as an autoescape-mode.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed autoescape-tag arguments.", nil)
- }
-
- return autoescapeNode, nil
-}
-
-func init() {
- RegisterTag("autoescape", tagAutoescapeParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_block.go b/vendor/github.com/flosch/pongo2/tags_block.go
deleted file mode 100644
index 86145f32..00000000
--- a/vendor/github.com/flosch/pongo2/tags_block.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
-)
-
-type tagBlockNode struct {
- name string
-}
-
-func (node *tagBlockNode) getBlockWrappers(tpl *Template) []*NodeWrapper {
- nodeWrappers := make([]*NodeWrapper, 0)
- var t *NodeWrapper
-
- for tpl != nil {
- t = tpl.blocks[node.name]
- if t != nil {
- nodeWrappers = append(nodeWrappers, t)
- }
- tpl = tpl.child
- }
-
- return nodeWrappers
-}
-
-func (node *tagBlockNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- tpl := ctx.template
- if tpl == nil {
- panic("internal error: tpl == nil")
- }
-
- // Determine the block to execute
- blockWrappers := node.getBlockWrappers(tpl)
- lenBlockWrappers := len(blockWrappers)
-
- if lenBlockWrappers == 0 {
- return ctx.Error("internal error: len(block_wrappers) == 0 in tagBlockNode.Execute()", nil)
- }
-
- blockWrapper := blockWrappers[lenBlockWrappers-1]
- ctx.Private["block"] = tagBlockInformation{
- ctx: ctx,
- wrappers: blockWrappers[0 : lenBlockWrappers-1],
- }
- err := blockWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-type tagBlockInformation struct {
- ctx *ExecutionContext
- wrappers []*NodeWrapper
-}
-
-func (t tagBlockInformation) Super() string {
- lenWrappers := len(t.wrappers)
-
- if lenWrappers == 0 {
- return ""
- }
-
- superCtx := NewChildExecutionContext(t.ctx)
- superCtx.Private["block"] = tagBlockInformation{
- ctx: t.ctx,
- wrappers: t.wrappers[0 : lenWrappers-1],
- }
-
- blockWrapper := t.wrappers[lenWrappers-1]
- buf := bytes.NewBufferString("")
- err := blockWrapper.Execute(superCtx, &templateWriter{buf})
- if err != nil {
- return ""
- }
- return buf.String()
-}
-
-func tagBlockParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- if arguments.Count() == 0 {
- return nil, arguments.Error("Tag 'block' requires an identifier.", nil)
- }
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("First argument for tag 'block' must be an identifier.", nil)
- }
-
- if arguments.Remaining() != 0 {
- return nil, arguments.Error("Tag 'block' takes exactly 1 argument (an identifier).", nil)
- }
-
- wrapper, endtagargs, err := doc.WrapUntilTag("endblock")
- if err != nil {
- return nil, err
- }
- if endtagargs.Remaining() > 0 {
- endtagnameToken := endtagargs.MatchType(TokenIdentifier)
- if endtagnameToken != nil {
- if endtagnameToken.Val != nameToken.Val {
- return nil, endtagargs.Error(fmt.Sprintf("Name for 'endblock' must equal to 'block'-tag's name ('%s' != '%s').",
- nameToken.Val, endtagnameToken.Val), nil)
- }
- }
-
- if endtagnameToken == nil || endtagargs.Remaining() > 0 {
- return nil, endtagargs.Error("Either no or only one argument (identifier) allowed for 'endblock'.", nil)
- }
- }
-
- tpl := doc.template
- if tpl == nil {
- panic("internal error: tpl == nil")
- }
- _, hasBlock := tpl.blocks[nameToken.Val]
- if !hasBlock {
- tpl.blocks[nameToken.Val] = wrapper
- } else {
- return nil, arguments.Error(fmt.Sprintf("Block named '%s' already defined", nameToken.Val), nil)
- }
-
- return &tagBlockNode{name: nameToken.Val}, nil
-}
-
-func init() {
- RegisterTag("block", tagBlockParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_comment.go b/vendor/github.com/flosch/pongo2/tags_comment.go
deleted file mode 100644
index 56a02ed9..00000000
--- a/vendor/github.com/flosch/pongo2/tags_comment.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package pongo2
-
-type tagCommentNode struct{}
-
-func (node *tagCommentNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagCommentParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- commentNode := &tagCommentNode{}
-
- // TODO: Process the endtag's arguments (see django 'comment'-tag documentation)
- err := doc.SkipUntilTag("endcomment")
- if err != nil {
- return nil, err
- }
-
- if arguments.Count() != 0 {
- return nil, arguments.Error("Tag 'comment' does not take any argument.", nil)
- }
-
- return commentNode, nil
-}
-
-func init() {
- RegisterTag("comment", tagCommentParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_cycle.go b/vendor/github.com/flosch/pongo2/tags_cycle.go
deleted file mode 100644
index ffbd254e..00000000
--- a/vendor/github.com/flosch/pongo2/tags_cycle.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package pongo2
-
-type tagCycleValue struct {
- node *tagCycleNode
- value *Value
-}
-
-type tagCycleNode struct {
- position *Token
- args []IEvaluator
- idx int
- asName string
- silent bool
-}
-
-func (cv *tagCycleValue) String() string {
- return cv.value.String()
-}
-
-func (node *tagCycleNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- item := node.args[node.idx%len(node.args)]
- node.idx++
-
- val, err := item.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if t, ok := val.Interface().(*tagCycleValue); ok {
- // {% cycle "test1" "test2"
- // {% cycle cycleitem %}
-
- // Update the cycle value with next value
- item := t.node.args[t.node.idx%len(t.node.args)]
- t.node.idx++
-
- val, err := item.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- t.value = val
-
- if !t.node.silent {
- writer.WriteString(val.String())
- }
- } else {
- // Regular call
-
- cycleValue := &tagCycleValue{
- node: node,
- value: val,
- }
-
- if node.asName != "" {
- ctx.Private[node.asName] = cycleValue
- }
- if !node.silent {
- writer.WriteString(val.String())
- }
- }
-
- return nil
-}
-
-// HINT: We're not supporting the old comma-separated list of expressions argument-style
-func tagCycleParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- cycleNode := &tagCycleNode{
- position: start,
- }
-
- for arguments.Remaining() > 0 {
- node, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- cycleNode.args = append(cycleNode.args, node)
-
- if arguments.MatchOne(TokenKeyword, "as") != nil {
- // as
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Name (identifier) expected after 'as'.", nil)
- }
- cycleNode.asName = nameToken.Val
-
- if arguments.MatchOne(TokenIdentifier, "silent") != nil {
- cycleNode.silent = true
- }
-
- // Now we're finished
- break
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed cycle-tag.", nil)
- }
-
- return cycleNode, nil
-}
-
-func init() {
- RegisterTag("cycle", tagCycleParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_extends.go b/vendor/github.com/flosch/pongo2/tags_extends.go
deleted file mode 100644
index 5771020a..00000000
--- a/vendor/github.com/flosch/pongo2/tags_extends.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package pongo2
-
-type tagExtendsNode struct {
- filename string
-}
-
-func (node *tagExtendsNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagExtendsParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- extendsNode := &tagExtendsNode{}
-
- if doc.template.level > 1 {
- return nil, arguments.Error("The 'extends' tag can only defined on root level.", start)
- }
-
- if doc.template.parent != nil {
- // Already one parent
- return nil, arguments.Error("This template has already one parent.", start)
- }
-
- if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
- // prepared, static template
-
- // Get parent's filename
- parentFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- // Parse the parent
- parentTemplate, err := doc.template.set.FromFile(parentFilename)
- if err != nil {
- return nil, err.(*Error)
- }
-
- // Keep track of things
- parentTemplate.child = doc.template
- doc.template.parent = parentTemplate
- extendsNode.filename = parentFilename
- } else {
- return nil, arguments.Error("Tag 'extends' requires a template filename as string.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Tag 'extends' does only take 1 argument.", nil)
- }
-
- return extendsNode, nil
-}
-
-func init() {
- RegisterTag("extends", tagExtendsParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_filter.go b/vendor/github.com/flosch/pongo2/tags_filter.go
deleted file mode 100644
index b38fd929..00000000
--- a/vendor/github.com/flosch/pongo2/tags_filter.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package pongo2
-
-import (
- "bytes"
-)
-
-type nodeFilterCall struct {
- name string
- paramExpr IEvaluator
-}
-
-type tagFilterNode struct {
- position *Token
- bodyWrapper *NodeWrapper
- filterChain []*nodeFilterCall
-}
-
-func (node *tagFilterNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- temp := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB size
-
- err := node.bodyWrapper.Execute(ctx, temp)
- if err != nil {
- return err
- }
-
- value := AsValue(temp.String())
-
- for _, call := range node.filterChain {
- var param *Value
- if call.paramExpr != nil {
- param, err = call.paramExpr.Evaluate(ctx)
- if err != nil {
- return err
- }
- } else {
- param = AsValue(nil)
- }
- value, err = ApplyFilter(call.name, value, param)
- if err != nil {
- return ctx.Error(err.Error(), node.position)
- }
- }
-
- writer.WriteString(value.String())
-
- return nil
-}
-
-func tagFilterParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- filterNode := &tagFilterNode{
- position: start,
- }
-
- wrapper, _, err := doc.WrapUntilTag("endfilter")
- if err != nil {
- return nil, err
- }
- filterNode.bodyWrapper = wrapper
-
- for arguments.Remaining() > 0 {
- filterCall := &nodeFilterCall{}
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Expected a filter name (identifier).", nil)
- }
- filterCall.name = nameToken.Val
-
- if arguments.MatchOne(TokenSymbol, ":") != nil {
- // Filter parameter
- // NOTICE: we can't use ParseExpression() here, because it would parse the next filter "|..." as well in the argument list
- expr, err := arguments.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- filterCall.paramExpr = expr
- }
-
- filterNode.filterChain = append(filterNode.filterChain, filterCall)
-
- if arguments.MatchOne(TokenSymbol, "|") == nil {
- break
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed filter-tag arguments.", nil)
- }
-
- return filterNode, nil
-}
-
-func init() {
- RegisterTag("filter", tagFilterParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_firstof.go b/vendor/github.com/flosch/pongo2/tags_firstof.go
deleted file mode 100644
index 5b2888e2..00000000
--- a/vendor/github.com/flosch/pongo2/tags_firstof.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package pongo2
-
-type tagFirstofNode struct {
- position *Token
- args []IEvaluator
-}
-
-func (node *tagFirstofNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, arg := range node.args {
- val, err := arg.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if val.IsTrue() {
- if ctx.Autoescape && !arg.FilterApplied("safe") {
- val, err = ApplyFilter("escape", val, nil)
- if err != nil {
- return err
- }
- }
-
- writer.WriteString(val.String())
- return nil
- }
- }
-
- return nil
-}
-
-func tagFirstofParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- firstofNode := &tagFirstofNode{
- position: start,
- }
-
- for arguments.Remaining() > 0 {
- node, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- firstofNode.args = append(firstofNode.args, node)
- }
-
- return firstofNode, nil
-}
-
-func init() {
- RegisterTag("firstof", tagFirstofParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_for.go b/vendor/github.com/flosch/pongo2/tags_for.go
deleted file mode 100644
index 5b0b5554..00000000
--- a/vendor/github.com/flosch/pongo2/tags_for.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package pongo2
-
-type tagForNode struct {
- key string
- value string // only for maps: for key, value in map
- objectEvaluator IEvaluator
- reversed bool
- sorted bool
-
- bodyWrapper *NodeWrapper
- emptyWrapper *NodeWrapper
-}
-
-type tagForLoopInformation struct {
- Counter int
- Counter0 int
- Revcounter int
- Revcounter0 int
- First bool
- Last bool
- Parentloop *tagForLoopInformation
-}
-
-func (node *tagForNode) Execute(ctx *ExecutionContext, writer TemplateWriter) (forError *Error) {
- // Backup forloop (as parentloop in public context), key-name and value-name
- forCtx := NewChildExecutionContext(ctx)
- parentloop := forCtx.Private["forloop"]
-
- // Create loop struct
- loopInfo := &tagForLoopInformation{
- First: true,
- }
-
- // Is it a loop in a loop?
- if parentloop != nil {
- loopInfo.Parentloop = parentloop.(*tagForLoopInformation)
- }
-
- // Register loopInfo in public context
- forCtx.Private["forloop"] = loopInfo
-
- obj, err := node.objectEvaluator.Evaluate(forCtx)
- if err != nil {
- return err
- }
-
- obj.IterateOrder(func(idx, count int, key, value *Value) bool {
- // There's something to iterate over (correct type and at least 1 item)
-
- // Update loop infos and public context
- forCtx.Private[node.key] = key
- if value != nil {
- forCtx.Private[node.value] = value
- }
- loopInfo.Counter = idx + 1
- loopInfo.Counter0 = idx
- if idx == 1 {
- loopInfo.First = false
- }
- if idx+1 == count {
- loopInfo.Last = true
- }
- loopInfo.Revcounter = count - idx // TODO: Not sure about this, have to look it up
- loopInfo.Revcounter0 = count - (idx + 1) // TODO: Not sure about this, have to look it up
-
- // Render elements with updated context
- err := node.bodyWrapper.Execute(forCtx, writer)
- if err != nil {
- forError = err
- return false
- }
- return true
- }, func() {
- // Nothing to iterate over (maybe wrong type or no items)
- if node.emptyWrapper != nil {
- err := node.emptyWrapper.Execute(forCtx, writer)
- if err != nil {
- forError = err
- }
- }
- }, node.reversed, node.sorted)
-
- return forError
-}
-
-func tagForParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- forNode := &tagForNode{}
-
- // Arguments parsing
- var valueToken *Token
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an key identifier as first argument for 'for'-tag", nil)
- }
-
- if arguments.Match(TokenSymbol, ",") != nil {
- // Value name is provided
- valueToken = arguments.MatchType(TokenIdentifier)
- if valueToken == nil {
- return nil, arguments.Error("Value name must be an identifier.", nil)
- }
- }
-
- if arguments.Match(TokenKeyword, "in") == nil {
- return nil, arguments.Error("Expected keyword 'in'.", nil)
- }
-
- objectEvaluator, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- forNode.objectEvaluator = objectEvaluator
- forNode.key = keyToken.Val
- if valueToken != nil {
- forNode.value = valueToken.Val
- }
-
- if arguments.MatchOne(TokenIdentifier, "reversed") != nil {
- forNode.reversed = true
- }
-
- if arguments.MatchOne(TokenIdentifier, "sorted") != nil {
- forNode.sorted = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed for-loop arguments.", nil)
- }
-
- // Body wrapping
- wrapper, endargs, err := doc.WrapUntilTag("empty", "endfor")
- if err != nil {
- return nil, err
- }
- forNode.bodyWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "empty" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endfor")
- if err != nil {
- return nil, err
- }
- forNode.emptyWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return forNode, nil
-}
-
-func init() {
- RegisterTag("for", tagForParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_if.go b/vendor/github.com/flosch/pongo2/tags_if.go
deleted file mode 100644
index 3eeaf3b4..00000000
--- a/vendor/github.com/flosch/pongo2/tags_if.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package pongo2
-
-type tagIfNode struct {
- conditions []IEvaluator
- wrappers []*NodeWrapper
-}
-
-func (node *tagIfNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for i, condition := range node.conditions {
- result, err := condition.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if result.IsTrue() {
- return node.wrappers[i].Execute(ctx, writer)
- }
- // Last condition?
- if len(node.conditions) == i+1 && len(node.wrappers) > i+1 {
- return node.wrappers[i+1].Execute(ctx, writer)
- }
- }
- return nil
-}
-
-func tagIfParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifNode := &tagIfNode{}
-
- // Parse first and main IF condition
- condition, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifNode.conditions = append(ifNode.conditions, condition)
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("If-condition is malformed.", nil)
- }
-
- // Check the rest
- for {
- wrapper, tagArgs, err := doc.WrapUntilTag("elif", "else", "endif")
- if err != nil {
- return nil, err
- }
- ifNode.wrappers = append(ifNode.wrappers, wrapper)
-
- if wrapper.Endtag == "elif" {
- // elif can take a condition
- condition, err = tagArgs.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifNode.conditions = append(ifNode.conditions, condition)
-
- if tagArgs.Remaining() > 0 {
- return nil, tagArgs.Error("Elif-condition is malformed.", nil)
- }
- } else {
- if tagArgs.Count() > 0 {
- // else/endif can't take any conditions
- return nil, tagArgs.Error("Arguments not allowed here.", nil)
- }
- }
-
- if wrapper.Endtag == "endif" {
- break
- }
- }
-
- return ifNode, nil
-}
-
-func init() {
- RegisterTag("if", tagIfParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifchanged.go b/vendor/github.com/flosch/pongo2/tags_ifchanged.go
deleted file mode 100644
index 45296a0a..00000000
--- a/vendor/github.com/flosch/pongo2/tags_ifchanged.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package pongo2
-
-import (
- "bytes"
-)
-
-type tagIfchangedNode struct {
- watchedExpr []IEvaluator
- lastValues []*Value
- lastContent []byte
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfchangedNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- if len(node.watchedExpr) == 0 {
- // Check against own rendered body
-
- buf := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
- err := node.thenWrapper.Execute(ctx, buf)
- if err != nil {
- return err
- }
-
- bufBytes := buf.Bytes()
- if !bytes.Equal(node.lastContent, bufBytes) {
- // Rendered content changed, output it
- writer.Write(bufBytes)
- node.lastContent = bufBytes
- }
- } else {
- nowValues := make([]*Value, 0, len(node.watchedExpr))
- for _, expr := range node.watchedExpr {
- val, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- nowValues = append(nowValues, val)
- }
-
- // Compare old to new values now
- changed := len(node.lastValues) == 0
-
- for idx, oldVal := range node.lastValues {
- if !oldVal.EqualValueTo(nowValues[idx]) {
- changed = true
- break // we can stop here because ONE value changed
- }
- }
-
- node.lastValues = nowValues
-
- if changed {
- // Render thenWrapper
- err := node.thenWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
- } else {
- // Render elseWrapper
- err := node.elseWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func tagIfchangedParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifchangedNode := &tagIfchangedNode{}
-
- for arguments.Remaining() > 0 {
- // Parse condition
- expr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifchangedNode.watchedExpr = append(ifchangedNode.watchedExpr, expr)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Ifchanged-arguments are malformed.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifchanged")
- if err != nil {
- return nil, err
- }
- ifchangedNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifchanged")
- if err != nil {
- return nil, err
- }
- ifchangedNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifchangedNode, nil
-}
-
-func init() {
- RegisterTag("ifchanged", tagIfchangedParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifequal.go b/vendor/github.com/flosch/pongo2/tags_ifequal.go
deleted file mode 100644
index 103f1c7b..00000000
--- a/vendor/github.com/flosch/pongo2/tags_ifequal.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package pongo2
-
-type tagIfEqualNode struct {
- var1, var2 IEvaluator
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- r1, err := node.var1.Evaluate(ctx)
- if err != nil {
- return err
- }
- r2, err := node.var2.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- result := r1.EqualValueTo(r2)
-
- if result {
- return node.thenWrapper.Execute(ctx, writer)
- }
- if node.elseWrapper != nil {
- return node.elseWrapper.Execute(ctx, writer)
- }
- return nil
-}
-
-func tagIfEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifequalNode := &tagIfEqualNode{}
-
- // Parse two expressions
- var1, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- var2, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifequalNode.var1 = var1
- ifequalNode.var2 = var2
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal")
- if err != nil {
- return nil, err
- }
- ifequalNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifequal")
- if err != nil {
- return nil, err
- }
- ifequalNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifequalNode, nil
-}
-
-func init() {
- RegisterTag("ifequal", tagIfEqualParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifnotequal.go b/vendor/github.com/flosch/pongo2/tags_ifnotequal.go
deleted file mode 100644
index 0d287d34..00000000
--- a/vendor/github.com/flosch/pongo2/tags_ifnotequal.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package pongo2
-
-type tagIfNotEqualNode struct {
- var1, var2 IEvaluator
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfNotEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- r1, err := node.var1.Evaluate(ctx)
- if err != nil {
- return err
- }
- r2, err := node.var2.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- result := !r1.EqualValueTo(r2)
-
- if result {
- return node.thenWrapper.Execute(ctx, writer)
- }
- if node.elseWrapper != nil {
- return node.elseWrapper.Execute(ctx, writer)
- }
- return nil
-}
-
-func tagIfNotEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifnotequalNode := &tagIfNotEqualNode{}
-
- // Parse two expressions
- var1, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- var2, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifnotequalNode.var1 = var1
- ifnotequalNode.var2 = var2
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifnotequal")
- if err != nil {
- return nil, err
- }
- ifnotequalNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifnotequal")
- if err != nil {
- return nil, err
- }
- ifnotequalNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifnotequalNode, nil
-}
-
-func init() {
- RegisterTag("ifnotequal", tagIfNotEqualParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_import.go b/vendor/github.com/flosch/pongo2/tags_import.go
deleted file mode 100644
index 7e0d6a01..00000000
--- a/vendor/github.com/flosch/pongo2/tags_import.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package pongo2
-
-import (
- "fmt"
-)
-
-type tagImportNode struct {
- position *Token
- filename string
- macros map[string]*tagMacroNode // alias/name -> macro instance
-}
-
-func (node *tagImportNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for name, macro := range node.macros {
- func(name string, macro *tagMacroNode) {
- ctx.Private[name] = func(args ...*Value) *Value {
- return macro.call(ctx, args...)
- }
- }(name, macro)
- }
- return nil
-}
-
-func tagImportParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- importNode := &tagImportNode{
- position: start,
- macros: make(map[string]*tagMacroNode),
- }
-
- filenameToken := arguments.MatchType(TokenString)
- if filenameToken == nil {
- return nil, arguments.Error("Import-tag needs a filename as string.", nil)
- }
-
- importNode.filename = doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- if arguments.Remaining() == 0 {
- return nil, arguments.Error("You must at least specify one macro to import.", nil)
- }
-
- // Compile the given template
- tpl, err := doc.template.set.FromFile(importNode.filename)
- if err != nil {
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, start)
- }
-
- for arguments.Remaining() > 0 {
- macroNameToken := arguments.MatchType(TokenIdentifier)
- if macroNameToken == nil {
- return nil, arguments.Error("Expected macro name (identifier).", nil)
- }
-
- asName := macroNameToken.Val
- if arguments.Match(TokenKeyword, "as") != nil {
- aliasToken := arguments.MatchType(TokenIdentifier)
- if aliasToken == nil {
- return nil, arguments.Error("Expected macro alias name (identifier).", nil)
- }
- asName = aliasToken.Val
- }
-
- macroInstance, has := tpl.exportedMacros[macroNameToken.Val]
- if !has {
- return nil, arguments.Error(fmt.Sprintf("Macro '%s' not found (or not exported) in '%s'.", macroNameToken.Val,
- importNode.filename), macroNameToken)
- }
-
- importNode.macros[asName] = macroInstance
-
- if arguments.Remaining() == 0 {
- break
- }
-
- if arguments.Match(TokenSymbol, ",") == nil {
- return nil, arguments.Error("Expected ','.", nil)
- }
- }
-
- return importNode, nil
-}
-
-func init() {
- RegisterTag("import", tagImportParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_include.go b/vendor/github.com/flosch/pongo2/tags_include.go
deleted file mode 100644
index 6d619fda..00000000
--- a/vendor/github.com/flosch/pongo2/tags_include.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package pongo2
-
-type tagIncludeNode struct {
- tpl *Template
- filenameEvaluator IEvaluator
- lazy bool
- only bool
- filename string
- withPairs map[string]IEvaluator
- ifExists bool
-}
-
-func (node *tagIncludeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- // Building the context for the template
- includeCtx := make(Context)
-
- // Fill the context with all data from the parent
- if !node.only {
- includeCtx.Update(ctx.Public)
- includeCtx.Update(ctx.Private)
- }
-
- // Put all custom with-pairs into the context
- for key, value := range node.withPairs {
- val, err := value.Evaluate(ctx)
- if err != nil {
- return err
- }
- includeCtx[key] = val
- }
-
- // Execute the template
- if node.lazy {
- // Evaluate the filename
- filename, err := node.filenameEvaluator.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if filename.String() == "" {
- return ctx.Error("Filename for 'include'-tag evaluated to an empty string.", nil)
- }
-
- // Get include-filename
- includedFilename := ctx.template.set.resolveFilename(ctx.template, filename.String())
-
- includedTpl, err2 := ctx.template.set.FromFile(includedFilename)
- if err2 != nil {
- // if this is ReadFile error, and "if_exists" flag is enabled
- if node.ifExists && err2.(*Error).Sender == "fromfile" {
- return nil
- }
- return err2.(*Error)
- }
- err2 = includedTpl.ExecuteWriter(includeCtx, writer)
- if err2 != nil {
- return err2.(*Error)
- }
- return nil
- }
- // Template is already parsed with static filename
- err := node.tpl.ExecuteWriter(includeCtx, writer)
- if err != nil {
- return err.(*Error)
- }
- return nil
-}
-
-type tagIncludeEmptyNode struct{}
-
-func (node *tagIncludeEmptyNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagIncludeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- includeNode := &tagIncludeNode{
- withPairs: make(map[string]IEvaluator),
- }
-
- if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
- // prepared, static template
-
- // "if_exists" flag
- ifExists := arguments.Match(TokenIdentifier, "if_exists") != nil
-
- // Get include-filename
- includedFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- // Parse the parent
- includeNode.filename = includedFilename
- includedTpl, err := doc.template.set.FromFile(includedFilename)
- if err != nil {
- // if this is ReadFile error, and "if_exists" token presents we should create and empty node
- if err.(*Error).Sender == "fromfile" && ifExists {
- return &tagIncludeEmptyNode{}, nil
- }
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, filenameToken)
- }
- includeNode.tpl = includedTpl
- } else {
- // No String, then the user wants to use lazy-evaluation (slower, but possible)
- filenameEvaluator, err := arguments.ParseExpression()
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(doc.template, filenameToken)
- }
- includeNode.filenameEvaluator = filenameEvaluator
- includeNode.lazy = true
- includeNode.ifExists = arguments.Match(TokenIdentifier, "if_exists") != nil // "if_exists" flag
- }
-
- // After having parsed the filename we're gonna parse the with+only options
- if arguments.Match(TokenIdentifier, "with") != nil {
- for arguments.Remaining() > 0 {
- // We have at least one key=expr pair (because of starting "with")
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(doc.template, keyToken)
- }
-
- includeNode.withPairs[keyToken.Val] = valueExpr
-
- // Only?
- if arguments.Match(TokenIdentifier, "only") != nil {
- includeNode.only = true
- break // stop parsing arguments because it's the last option
- }
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed 'include'-tag arguments.", nil)
- }
-
- return includeNode, nil
-}
-
-func init() {
- RegisterTag("include", tagIncludeParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_lorem.go b/vendor/github.com/flosch/pongo2/tags_lorem.go
deleted file mode 100644
index 7794f6c1..00000000
--- a/vendor/github.com/flosch/pongo2/tags_lorem.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "math/rand"
- "strings"
- "time"
-)
-
-var (
- tagLoremParagraphs = strings.Split(tagLoremText, "\n")
- tagLoremWords = strings.Fields(tagLoremText)
-)
-
-type tagLoremNode struct {
- position *Token
- count int // number of paragraphs
- method string // w = words, p = HTML paragraphs, b = plain-text (default is b)
- random bool // does not use the default paragraph "Lorem ipsum dolor sit amet, ..."
-}
-
-func (node *tagLoremNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- switch node.method {
- case "b":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
- writer.WriteString(par)
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
- writer.WriteString(par)
- }
- }
- case "w":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString(" ")
- }
- word := tagLoremWords[rand.Intn(len(tagLoremWords))]
- writer.WriteString(word)
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString(" ")
- }
- word := tagLoremWords[i%len(tagLoremWords)]
- writer.WriteString(word)
- }
- }
- case "p":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- writer.WriteString("")
- par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
- writer.WriteString(par)
- writer.WriteString("
")
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- writer.WriteString("")
- par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
- writer.WriteString(par)
- writer.WriteString("
")
-
- }
- }
- default:
- return ctx.OrigError(fmt.Errorf("unsupported method: %s", node.method), nil)
- }
-
- return nil
-}
-
-func tagLoremParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- loremNode := &tagLoremNode{
- position: start,
- count: 1,
- method: "b",
- }
-
- if countToken := arguments.MatchType(TokenNumber); countToken != nil {
- loremNode.count = AsValue(countToken.Val).Integer()
- }
-
- if methodToken := arguments.MatchType(TokenIdentifier); methodToken != nil {
- if methodToken.Val != "w" && methodToken.Val != "p" && methodToken.Val != "b" {
- return nil, arguments.Error("lorem-method must be either 'w', 'p' or 'b'.", nil)
- }
-
- loremNode.method = methodToken.Val
- }
-
- if arguments.MatchOne(TokenIdentifier, "random") != nil {
- loremNode.random = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed lorem-tag arguments.", nil)
- }
-
- return loremNode, nil
-}
-
-func init() {
- rand.Seed(time.Now().Unix())
-
- RegisterTag("lorem", tagLoremParser)
-}
-
-const tagLoremText = `Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
-Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
-Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
-Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
-Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.
-At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat.
-Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.`
diff --git a/vendor/github.com/flosch/pongo2/tags_macro.go b/vendor/github.com/flosch/pongo2/tags_macro.go
deleted file mode 100644
index dd3e0bf4..00000000
--- a/vendor/github.com/flosch/pongo2/tags_macro.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
-)
-
-type tagMacroNode struct {
- position *Token
- name string
- argsOrder []string
- args map[string]IEvaluator
- exported bool
-
- wrapper *NodeWrapper
-}
-
-func (node *tagMacroNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- ctx.Private[node.name] = func(args ...*Value) *Value {
- return node.call(ctx, args...)
- }
-
- return nil
-}
-
-func (node *tagMacroNode) call(ctx *ExecutionContext, args ...*Value) *Value {
- argsCtx := make(Context)
-
- for k, v := range node.args {
- if v == nil {
- // User did not provided a default value
- argsCtx[k] = nil
- } else {
- // Evaluate the default value
- valueExpr, err := v.Evaluate(ctx)
- if err != nil {
- ctx.Logf(err.Error())
- return AsSafeValue(err.Error())
- }
-
- argsCtx[k] = valueExpr
- }
- }
-
- if len(args) > len(node.argsOrder) {
- // Too many arguments, we're ignoring them and just logging into debug mode.
- err := ctx.Error(fmt.Sprintf("Macro '%s' called with too many arguments (%d instead of %d).",
- node.name, len(args), len(node.argsOrder)), nil).updateFromTokenIfNeeded(ctx.template, node.position)
-
- ctx.Logf(err.Error()) // TODO: This is a workaround, because the error is not returned yet to the Execution()-methods
- return AsSafeValue(err.Error())
- }
-
- // Make a context for the macro execution
- macroCtx := NewChildExecutionContext(ctx)
-
- // Register all arguments in the private context
- macroCtx.Private.Update(argsCtx)
-
- for idx, argValue := range args {
- macroCtx.Private[node.argsOrder[idx]] = argValue.Interface()
- }
-
- var b bytes.Buffer
- err := node.wrapper.Execute(macroCtx, &b)
- if err != nil {
- return AsSafeValue(err.updateFromTokenIfNeeded(ctx.template, node.position).Error())
- }
-
- return AsSafeValue(b.String())
-}
-
-func tagMacroParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- macroNode := &tagMacroNode{
- position: start,
- args: make(map[string]IEvaluator),
- }
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Macro-tag needs at least an identifier as name.", nil)
- }
- macroNode.name = nameToken.Val
-
- if arguments.MatchOne(TokenSymbol, "(") == nil {
- return nil, arguments.Error("Expected '('.", nil)
- }
-
- for arguments.Match(TokenSymbol, ")") == nil {
- argNameToken := arguments.MatchType(TokenIdentifier)
- if argNameToken == nil {
- return nil, arguments.Error("Expected argument name as identifier.", nil)
- }
- macroNode.argsOrder = append(macroNode.argsOrder, argNameToken.Val)
-
- if arguments.Match(TokenSymbol, "=") != nil {
- // Default expression follows
- argDefaultExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- macroNode.args[argNameToken.Val] = argDefaultExpr
- } else {
- // No default expression
- macroNode.args[argNameToken.Val] = nil
- }
-
- if arguments.Match(TokenSymbol, ")") != nil {
- break
- }
- if arguments.Match(TokenSymbol, ",") == nil {
- return nil, arguments.Error("Expected ',' or ')'.", nil)
- }
- }
-
- if arguments.Match(TokenKeyword, "export") != nil {
- macroNode.exported = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed macro-tag.", nil)
- }
-
- // Body wrapping
- wrapper, endargs, err := doc.WrapUntilTag("endmacro")
- if err != nil {
- return nil, err
- }
- macroNode.wrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if macroNode.exported {
- // Now register the macro if it wants to be exported
- _, has := doc.template.exportedMacros[macroNode.name]
- if has {
- return nil, doc.Error(fmt.Sprintf("another macro with name '%s' already exported", macroNode.name), start)
- }
- doc.template.exportedMacros[macroNode.name] = macroNode
- }
-
- return macroNode, nil
-}
-
-func init() {
- RegisterTag("macro", tagMacroParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_now.go b/vendor/github.com/flosch/pongo2/tags_now.go
deleted file mode 100644
index d9fa4a37..00000000
--- a/vendor/github.com/flosch/pongo2/tags_now.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package pongo2
-
-import (
- "time"
-)
-
-type tagNowNode struct {
- position *Token
- format string
- fake bool
-}
-
-func (node *tagNowNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- var t time.Time
- if node.fake {
- t = time.Date(2014, time.February, 05, 18, 31, 45, 00, time.UTC)
- } else {
- t = time.Now()
- }
-
- writer.WriteString(t.Format(node.format))
-
- return nil
-}
-
-func tagNowParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- nowNode := &tagNowNode{
- position: start,
- }
-
- formatToken := arguments.MatchType(TokenString)
- if formatToken == nil {
- return nil, arguments.Error("Expected a format string.", nil)
- }
- nowNode.format = formatToken.Val
-
- if arguments.MatchOne(TokenIdentifier, "fake") != nil {
- nowNode.fake = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed now-tag arguments.", nil)
- }
-
- return nowNode, nil
-}
-
-func init() {
- RegisterTag("now", tagNowParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_set.go b/vendor/github.com/flosch/pongo2/tags_set.go
deleted file mode 100644
index be121c12..00000000
--- a/vendor/github.com/flosch/pongo2/tags_set.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package pongo2
-
-type tagSetNode struct {
- name string
- expression IEvaluator
-}
-
-func (node *tagSetNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- // Evaluate expression
- value, err := node.expression.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- ctx.Private[node.name] = value
- return nil
-}
-
-func tagSetParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- node := &tagSetNode{}
-
- // Parse variable name
- typeToken := arguments.MatchType(TokenIdentifier)
- if typeToken == nil {
- return nil, arguments.Error("Expected an identifier.", nil)
- }
- node.name = typeToken.Val
-
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
-
- // Variable expression
- keyExpression, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- node.expression = keyExpression
-
- // Remaining arguments
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed 'set'-tag arguments.", nil)
- }
-
- return node, nil
-}
-
-func init() {
- RegisterTag("set", tagSetParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_spaceless.go b/vendor/github.com/flosch/pongo2/tags_spaceless.go
deleted file mode 100644
index 4fa851ba..00000000
--- a/vendor/github.com/flosch/pongo2/tags_spaceless.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "regexp"
-)
-
-type tagSpacelessNode struct {
- wrapper *NodeWrapper
-}
-
-var tagSpacelessRegexp = regexp.MustCompile(`(?U:(<.*>))([\t\n\v\f\r ]+)(?U:(<.*>))`)
-
-func (node *tagSpacelessNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- b := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
-
- err := node.wrapper.Execute(ctx, b)
- if err != nil {
- return err
- }
-
- s := b.String()
- // Repeat this recursively
- changed := true
- for changed {
- s2 := tagSpacelessRegexp.ReplaceAllString(s, "$1$3")
- changed = s != s2
- s = s2
- }
-
- writer.WriteString(s)
-
- return nil
-}
-
-func tagSpacelessParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- spacelessNode := &tagSpacelessNode{}
-
- wrapper, _, err := doc.WrapUntilTag("endspaceless")
- if err != nil {
- return nil, err
- }
- spacelessNode.wrapper = wrapper
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed spaceless-tag arguments.", nil)
- }
-
- return spacelessNode, nil
-}
-
-func init() {
- RegisterTag("spaceless", tagSpacelessParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ssi.go b/vendor/github.com/flosch/pongo2/tags_ssi.go
deleted file mode 100644
index c33858d5..00000000
--- a/vendor/github.com/flosch/pongo2/tags_ssi.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package pongo2
-
-import (
- "io/ioutil"
-)
-
-type tagSSINode struct {
- filename string
- content string
- template *Template
-}
-
-func (node *tagSSINode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- if node.template != nil {
- // Execute the template within the current context
- includeCtx := make(Context)
- includeCtx.Update(ctx.Public)
- includeCtx.Update(ctx.Private)
-
- err := node.template.execute(includeCtx, writer)
- if err != nil {
- return err.(*Error)
- }
- } else {
- // Just print out the content
- writer.WriteString(node.content)
- }
- return nil
-}
-
-func tagSSIParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- SSINode := &tagSSINode{}
-
- if fileToken := arguments.MatchType(TokenString); fileToken != nil {
- SSINode.filename = fileToken.Val
-
- if arguments.Match(TokenIdentifier, "parsed") != nil {
- // parsed
- temporaryTpl, err := doc.template.set.FromFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
- if err != nil {
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, fileToken)
- }
- SSINode.template = temporaryTpl
- } else {
- // plaintext
- buf, err := ioutil.ReadFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
- if err != nil {
- return nil, (&Error{
- Sender: "tag:ssi",
- OrigError: err,
- }).updateFromTokenIfNeeded(doc.template, fileToken)
- }
- SSINode.content = string(buf)
- }
- } else {
- return nil, arguments.Error("First argument must be a string.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed SSI-tag argument.", nil)
- }
-
- return SSINode, nil
-}
-
-func init() {
- RegisterTag("ssi", tagSSIParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_templatetag.go b/vendor/github.com/flosch/pongo2/tags_templatetag.go
deleted file mode 100644
index 164b4dc3..00000000
--- a/vendor/github.com/flosch/pongo2/tags_templatetag.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package pongo2
-
-type tagTemplateTagNode struct {
- content string
-}
-
-var templateTagMapping = map[string]string{
- "openblock": "{%",
- "closeblock": "%}",
- "openvariable": "{{",
- "closevariable": "}}",
- "openbrace": "{",
- "closebrace": "}",
- "opencomment": "{#",
- "closecomment": "#}",
-}
-
-func (node *tagTemplateTagNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- writer.WriteString(node.content)
- return nil
-}
-
-func tagTemplateTagParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ttNode := &tagTemplateTagNode{}
-
- if argToken := arguments.MatchType(TokenIdentifier); argToken != nil {
- output, found := templateTagMapping[argToken.Val]
- if !found {
- return nil, arguments.Error("Argument not found", argToken)
- }
- ttNode.content = output
- } else {
- return nil, arguments.Error("Identifier expected.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed templatetag-tag argument.", nil)
- }
-
- return ttNode, nil
-}
-
-func init() {
- RegisterTag("templatetag", tagTemplateTagParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_widthratio.go b/vendor/github.com/flosch/pongo2/tags_widthratio.go
deleted file mode 100644
index 70c9c3e8..00000000
--- a/vendor/github.com/flosch/pongo2/tags_widthratio.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "math"
-)
-
-type tagWidthratioNode struct {
- position *Token
- current, max IEvaluator
- width IEvaluator
- ctxName string
-}
-
-func (node *tagWidthratioNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- current, err := node.current.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- max, err := node.max.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- width, err := node.width.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- value := int(math.Ceil(current.Float()/max.Float()*width.Float() + 0.5))
-
- if node.ctxName == "" {
- writer.WriteString(fmt.Sprintf("%d", value))
- } else {
- ctx.Private[node.ctxName] = value
- }
-
- return nil
-}
-
-func tagWidthratioParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- widthratioNode := &tagWidthratioNode{
- position: start,
- }
-
- current, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.current = current
-
- max, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.max = max
-
- width, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.width = width
-
- if arguments.MatchOne(TokenKeyword, "as") != nil {
- // Name follows
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Expected name (identifier).", nil)
- }
- widthratioNode.ctxName = nameToken.Val
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed widthratio-tag arguments.", nil)
- }
-
- return widthratioNode, nil
-}
-
-func init() {
- RegisterTag("widthratio", tagWidthratioParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_with.go b/vendor/github.com/flosch/pongo2/tags_with.go
deleted file mode 100644
index 32b3c1c4..00000000
--- a/vendor/github.com/flosch/pongo2/tags_with.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pongo2
-
-type tagWithNode struct {
- withPairs map[string]IEvaluator
- wrapper *NodeWrapper
-}
-
-func (node *tagWithNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- //new context for block
- withctx := NewChildExecutionContext(ctx)
-
- // Put all custom with-pairs into the context
- for key, value := range node.withPairs {
- val, err := value.Evaluate(ctx)
- if err != nil {
- return err
- }
- withctx.Private[key] = val
- }
-
- return node.wrapper.Execute(withctx, writer)
-}
-
-func tagWithParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- withNode := &tagWithNode{
- withPairs: make(map[string]IEvaluator),
- }
-
- if arguments.Count() == 0 {
- return nil, arguments.Error("Tag 'with' requires at least one argument.", nil)
- }
-
- wrapper, endargs, err := doc.WrapUntilTag("endwith")
- if err != nil {
- return nil, err
- }
- withNode.wrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- // Scan through all arguments to see which style the user uses (old or new style).
- // If we find any "as" keyword we will enforce old style; otherwise we will use new style.
- oldStyle := false // by default we're using the new_style
- for i := 0; i < arguments.Count(); i++ {
- if arguments.PeekN(i, TokenKeyword, "as") != nil {
- oldStyle = true
- break
- }
- }
-
- for arguments.Remaining() > 0 {
- if oldStyle {
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- if arguments.Match(TokenKeyword, "as") == nil {
- return nil, arguments.Error("Expected 'as' keyword.", nil)
- }
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- withNode.withPairs[keyToken.Val] = valueExpr
- } else {
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- withNode.withPairs[keyToken.Val] = valueExpr
- }
- }
-
- return withNode, nil
-}
-
-func init() {
- RegisterTag("with", tagWithParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/template.go b/vendor/github.com/flosch/pongo2/template.go
deleted file mode 100644
index 47666c94..00000000
--- a/vendor/github.com/flosch/pongo2/template.go
+++ /dev/null
@@ -1,276 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
-)
-
-type TemplateWriter interface {
- io.Writer
- WriteString(string) (int, error)
-}
-
-type templateWriter struct {
- w io.Writer
-}
-
-func (tw *templateWriter) WriteString(s string) (int, error) {
- return tw.w.Write([]byte(s))
-}
-
-func (tw *templateWriter) Write(b []byte) (int, error) {
- return tw.w.Write(b)
-}
-
-type Template struct {
- set *TemplateSet
-
- // Input
- isTplString bool
- name string
- tpl string
- size int
-
- // Calculation
- tokens []*Token
- parser *Parser
-
- // first come, first serve (it's important to not override existing entries in here)
- level int
- parent *Template
- child *Template
- blocks map[string]*NodeWrapper
- exportedMacros map[string]*tagMacroNode
-
- // Output
- root *nodeDocument
-
- // Options allow you to change the behavior of template-engine.
- // You can change the options before calling the Execute method.
- Options *Options
-}
-
-func newTemplateString(set *TemplateSet, tpl []byte) (*Template, error) {
- return newTemplate(set, "", true, tpl)
-}
-
-func newTemplate(set *TemplateSet, name string, isTplString bool, tpl []byte) (*Template, error) {
- strTpl := string(tpl)
-
- // Create the template
- t := &Template{
- set: set,
- isTplString: isTplString,
- name: name,
- tpl: strTpl,
- size: len(strTpl),
- blocks: make(map[string]*NodeWrapper),
- exportedMacros: make(map[string]*tagMacroNode),
- Options: newOptions(),
- }
- // Copy all settings from another Options.
- t.Options.Update(set.Options)
-
- // Tokenize it
- tokens, err := lex(name, strTpl)
- if err != nil {
- return nil, err
- }
- t.tokens = tokens
-
- // For debugging purposes, show all tokens:
- /*for i, t := range tokens {
- fmt.Printf("%3d. %s\n", i, t)
- }*/
-
- // Parse it
- err = t.parse()
- if err != nil {
- return nil, err
- }
-
- return t, nil
-}
-
-func (tpl *Template) newContextForExecution(context Context) (*Template, *ExecutionContext, error) {
- if tpl.Options.TrimBlocks || tpl.Options.LStripBlocks {
- // Issue #94 https://github.com/flosch/pongo2/issues/94
- // If an application configures pongo2 template to trim_blocks,
- // the first newline after a template tag is removed automatically (like in PHP).
- prev := &Token{
- Typ: TokenHTML,
- Val: "\n",
- }
-
- for _, t := range tpl.tokens {
- if tpl.Options.LStripBlocks {
- if prev.Typ == TokenHTML && t.Typ != TokenHTML && t.Val == "{%" {
- prev.Val = strings.TrimRight(prev.Val, "\t ")
- }
- }
-
- if tpl.Options.TrimBlocks {
- if prev.Typ != TokenHTML && t.Typ == TokenHTML && prev.Val == "%}" {
- if len(t.Val) > 0 && t.Val[0] == '\n' {
- t.Val = t.Val[1:len(t.Val)]
- }
- }
- }
-
- prev = t
- }
- }
-
- // Determine the parent to be executed (for template inheritance)
- parent := tpl
- for parent.parent != nil {
- parent = parent.parent
- }
-
- // Create context if none is given
- newContext := make(Context)
- newContext.Update(tpl.set.Globals)
-
- if context != nil {
- newContext.Update(context)
-
- if len(newContext) > 0 {
- // Check for context name syntax
- err := newContext.checkForValidIdentifiers()
- if err != nil {
- return parent, nil, err
- }
-
- // Check for clashes with macro names
- for k := range newContext {
- _, has := tpl.exportedMacros[k]
- if has {
- return parent, nil, &Error{
- Filename: tpl.name,
- Sender: "execution",
- OrigError: fmt.Errorf("context key name '%s' clashes with macro '%s'", k, k),
- }
- }
- }
- }
- }
-
- // Create operational context
- ctx := newExecutionContext(parent, newContext)
-
- return parent, ctx, nil
-}
-
-func (tpl *Template) execute(context Context, writer TemplateWriter) error {
- parent, ctx, err := tpl.newContextForExecution(context)
- if err != nil {
- return err
- }
-
- // Run the selected document
- if err := parent.root.Execute(ctx, writer); err != nil {
- return err
- }
-
- return nil
-}
-
-func (tpl *Template) newTemplateWriterAndExecute(context Context, writer io.Writer) error {
- return tpl.execute(context, &templateWriter{w: writer})
-}
-
-func (tpl *Template) newBufferAndExecute(context Context) (*bytes.Buffer, error) {
- // Create output buffer
- // We assume that the rendered template will be 30% larger
- buffer := bytes.NewBuffer(make([]byte, 0, int(float64(tpl.size)*1.3)))
- if err := tpl.execute(context, buffer); err != nil {
- return nil, err
- }
- return buffer, nil
-}
-
-// Executes the template with the given context and writes to writer (io.Writer)
-// on success. Context can be nil. Nothing is written on error; instead the error
-// is being returned.
-func (tpl *Template) ExecuteWriter(context Context, writer io.Writer) error {
- buf, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return err
- }
- _, err = buf.WriteTo(writer)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Same as ExecuteWriter. The only difference between both functions is that
-// this function might already have written parts of the generated template in the
-// case of an execution error because there's no intermediate buffer involved for
-// performance reasons. This is handy if you need high performance template
-// generation or if you want to manage your own pool of buffers.
-func (tpl *Template) ExecuteWriterUnbuffered(context Context, writer io.Writer) error {
- return tpl.newTemplateWriterAndExecute(context, writer)
-}
-
-// Executes the template and returns the rendered template as a []byte
-func (tpl *Template) ExecuteBytes(context Context) ([]byte, error) {
- // Execute template
- buffer, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return nil, err
- }
- return buffer.Bytes(), nil
-}
-
-// Executes the template and returns the rendered template as a string
-func (tpl *Template) Execute(context Context) (string, error) {
- // Execute template
- buffer, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return "", err
- }
-
- return buffer.String(), nil
-
-}
-
-func (tpl *Template) ExecuteBlocks(context Context, blocks []string) (map[string]string, error) {
- var parents []*Template
- result := make(map[string]string)
-
- parent := tpl
- for parent != nil {
- parents = append(parents, parent)
- parent = parent.parent
- }
-
- for _, t := range parents {
- buffer := bytes.NewBuffer(make([]byte, 0, int(float64(t.size)*1.3)))
- _, ctx, err := t.newContextForExecution(context)
- if err != nil {
- return nil, err
- }
- for _, blockName := range blocks {
- if _, ok := result[blockName]; ok {
- continue
- }
- if blockWrapper, ok := t.blocks[blockName]; ok {
- bErr := blockWrapper.Execute(ctx, buffer)
- if bErr != nil {
- return nil, bErr
- }
- result[blockName] = buffer.String()
- buffer.Reset()
- }
- }
- // We have found all blocks
- if len(blocks) == len(result) {
- break
- }
- }
-
- return result, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/template_loader.go b/vendor/github.com/flosch/pongo2/template_loader.go
deleted file mode 100644
index abd23409..00000000
--- a/vendor/github.com/flosch/pongo2/template_loader.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
-)
-
-// LocalFilesystemLoader represents a local filesystem loader with basic
-// BaseDirectory capabilities. The access to the local filesystem is unrestricted.
-type LocalFilesystemLoader struct {
- baseDir string
-}
-
-// MustNewLocalFileSystemLoader creates a new LocalFilesystemLoader instance
-// and panics if there's any error during instantiation. The parameters
-// are the same like NewLocalFileSystemLoader.
-func MustNewLocalFileSystemLoader(baseDir string) *LocalFilesystemLoader {
- fs, err := NewLocalFileSystemLoader(baseDir)
- if err != nil {
- log.Panic(err)
- }
- return fs
-}
-
-// NewLocalFileSystemLoader creates a new LocalFilesystemLoader and allows
-// templatesto be loaded from disk (unrestricted). If any base directory
-// is given (or being set using SetBaseDir), this base directory is being used
-// for path calculation in template inclusions/imports. Otherwise the path
-// is calculated based relatively to the including template's path.
-func NewLocalFileSystemLoader(baseDir string) (*LocalFilesystemLoader, error) {
- fs := &LocalFilesystemLoader{}
- if baseDir != "" {
- if err := fs.SetBaseDir(baseDir); err != nil {
- return nil, err
- }
- }
- return fs, nil
-}
-
-// SetBaseDir sets the template's base directory. This directory will
-// be used for any relative path in filters, tags and From*-functions to determine
-// your template. See the comment for NewLocalFileSystemLoader as well.
-func (fs *LocalFilesystemLoader) SetBaseDir(path string) error {
- // Make the path absolute
- if !filepath.IsAbs(path) {
- abs, err := filepath.Abs(path)
- if err != nil {
- return err
- }
- path = abs
- }
-
- // Check for existence
- fi, err := os.Stat(path)
- if err != nil {
- return err
- }
- if !fi.IsDir() {
- return fmt.Errorf("The given path '%s' is not a directory.", path)
- }
-
- fs.baseDir = path
- return nil
-}
-
-// Get reads the path's content from your local filesystem.
-func (fs *LocalFilesystemLoader) Get(path string) (io.Reader, error) {
- buf, err := ioutil.ReadFile(path)
- if err != nil {
- return nil, err
- }
- return bytes.NewReader(buf), nil
-}
-
-// Abs resolves a filename relative to the base directory. Absolute paths are allowed.
-// When there's no base dir set, the absolute path to the filename
-// will be calculated based on either the provided base directory (which
-// might be a path of a template which includes another template) or
-// the current working directory.
-func (fs *LocalFilesystemLoader) Abs(base, name string) string {
- if filepath.IsAbs(name) {
- return name
- }
-
- // Our own base dir has always priority; if there's none
- // we use the path provided in base.
- var err error
- if fs.baseDir == "" {
- if base == "" {
- base, err = os.Getwd()
- if err != nil {
- panic(err)
- }
- return filepath.Join(base, name)
- }
-
- return filepath.Join(filepath.Dir(base), name)
- }
-
- return filepath.Join(fs.baseDir, name)
-}
-
-// SandboxedFilesystemLoader is still WIP.
-type SandboxedFilesystemLoader struct {
- *LocalFilesystemLoader
-}
-
-// NewSandboxedFilesystemLoader creates a new sandboxed local file system instance.
-func NewSandboxedFilesystemLoader(baseDir string) (*SandboxedFilesystemLoader, error) {
- fs, err := NewLocalFileSystemLoader(baseDir)
- if err != nil {
- return nil, err
- }
- return &SandboxedFilesystemLoader{
- LocalFilesystemLoader: fs,
- }, nil
-}
-
-// Move sandbox to a virtual fs
-
-/*
-if len(set.SandboxDirectories) > 0 {
- defer func() {
- // Remove any ".." or other crap
- resolvedPath = filepath.Clean(resolvedPath)
-
- // Make the path absolute
- absPath, err := filepath.Abs(resolvedPath)
- if err != nil {
- panic(err)
- }
- resolvedPath = absPath
-
- // Check against the sandbox directories (once one pattern matches, we're done and can allow it)
- for _, pattern := range set.SandboxDirectories {
- matched, err := filepath.Match(pattern, resolvedPath)
- if err != nil {
- panic("Wrong sandbox directory match pattern (see http://golang.org/pkg/path/filepath/#Match).")
- }
- if matched {
- // OK!
- return
- }
- }
-
- // No pattern matched, we have to log+deny the request
- set.logf("Access attempt outside of the sandbox directories (blocked): '%s'", resolvedPath)
- resolvedPath = ""
- }()
-}
-*/
diff --git a/vendor/github.com/flosch/pongo2/template_sets.go b/vendor/github.com/flosch/pongo2/template_sets.go
deleted file mode 100644
index 4b1e43da..00000000
--- a/vendor/github.com/flosch/pongo2/template_sets.go
+++ /dev/null
@@ -1,305 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "sync"
-
- "errors"
-)
-
-// TemplateLoader allows to implement a virtual file system.
-type TemplateLoader interface {
- // Abs calculates the path to a given template. Whenever a path must be resolved
- // due to an import from another template, the base equals the parent template's path.
- Abs(base, name string) string
-
- // Get returns an io.Reader where the template's content can be read from.
- Get(path string) (io.Reader, error)
-}
-
-// TemplateSet allows you to create your own group of templates with their own
-// global context (which is shared among all members of the set) and their own
-// configuration.
-// It's useful for a separation of different kind of templates
-// (e. g. web templates vs. mail templates).
-type TemplateSet struct {
- name string
- loaders []TemplateLoader
-
- // Globals will be provided to all templates created within this template set
- Globals Context
-
- // If debug is true (default false), ExecutionContext.Logf() will work and output
- // to STDOUT. Furthermore, FromCache() won't cache the templates.
- // Make sure to synchronize the access to it in case you're changing this
- // variable during program execution (and template compilation/execution).
- Debug bool
-
- // Options allow you to change the behavior of template-engine.
- // You can change the options before calling the Execute method.
- Options *Options
-
- // Sandbox features
- // - Disallow access to specific tags and/or filters (using BanTag() and BanFilter())
- //
- // For efficiency reasons you can ban tags/filters only *before* you have
- // added your first template to the set (restrictions are statically checked).
- // After you added one, it's not possible anymore (for your personal security).
- firstTemplateCreated bool
- bannedTags map[string]bool
- bannedFilters map[string]bool
-
- // Template cache (for FromCache())
- templateCache map[string]*Template
- templateCacheMutex sync.Mutex
-}
-
-// NewSet can be used to create sets with different kind of templates
-// (e. g. web from mail templates), with different globals or
-// other configurations.
-func NewSet(name string, loaders ...TemplateLoader) *TemplateSet {
- if len(loaders) == 0 {
- panic(fmt.Errorf("at least one template loader must be specified"))
- }
-
- return &TemplateSet{
- name: name,
- loaders: loaders,
- Globals: make(Context),
- bannedTags: make(map[string]bool),
- bannedFilters: make(map[string]bool),
- templateCache: make(map[string]*Template),
- Options: newOptions(),
- }
-}
-
-func (set *TemplateSet) AddLoader(loaders ...TemplateLoader) {
- set.loaders = append(set.loaders, loaders...)
-}
-
-func (set *TemplateSet) resolveFilename(tpl *Template, path string) string {
- return set.resolveFilenameForLoader(set.loaders[0], tpl, path)
-}
-
-func (set *TemplateSet) resolveFilenameForLoader(loader TemplateLoader, tpl *Template, path string) string {
- name := ""
- if tpl != nil && tpl.isTplString {
- return path
- }
- if tpl != nil {
- name = tpl.name
- }
-
- return loader.Abs(name, path)
-}
-
-// BanTag bans a specific tag for this template set. See more in the documentation for TemplateSet.
-func (set *TemplateSet) BanTag(name string) error {
- _, has := tags[name]
- if !has {
- return fmt.Errorf("tag '%s' not found", name)
- }
- if set.firstTemplateCreated {
- return errors.New("you cannot ban any tags after you've added your first template to your template set")
- }
- _, has = set.bannedTags[name]
- if has {
- return fmt.Errorf("tag '%s' is already banned", name)
- }
- set.bannedTags[name] = true
-
- return nil
-}
-
-// BanFilter bans a specific filter for this template set. See more in the documentation for TemplateSet.
-func (set *TemplateSet) BanFilter(name string) error {
- _, has := filters[name]
- if !has {
- return fmt.Errorf("filter '%s' not found", name)
- }
- if set.firstTemplateCreated {
- return errors.New("you cannot ban any filters after you've added your first template to your template set")
- }
- _, has = set.bannedFilters[name]
- if has {
- return fmt.Errorf("filter '%s' is already banned", name)
- }
- set.bannedFilters[name] = true
-
- return nil
-}
-
-func (set *TemplateSet) resolveTemplate(tpl *Template, path string) (name string, loader TemplateLoader, fd io.Reader, err error) {
- // iterate over loaders until we appear to have a valid template
- for _, loader = range set.loaders {
- name = set.resolveFilenameForLoader(loader, tpl, path)
- fd, err = loader.Get(name)
- if err == nil {
- return
- }
- }
-
- return path, nil, nil, fmt.Errorf("unable to resolve template")
-}
-
-// CleanCache cleans the template cache. If filenames is not empty,
-// it will remove the template caches of those filenames.
-// Or it will empty the whole template cache. It is thread-safe.
-func (set *TemplateSet) CleanCache(filenames ...string) {
- set.templateCacheMutex.Lock()
- defer set.templateCacheMutex.Unlock()
-
- if len(filenames) == 0 {
- set.templateCache = make(map[string]*Template, len(set.templateCache))
- }
-
- for _, filename := range filenames {
- delete(set.templateCache, set.resolveFilename(nil, filename))
- }
-}
-
-// FromCache is a convenient method to cache templates. It is thread-safe
-// and will only compile the template associated with a filename once.
-// If TemplateSet.Debug is true (for example during development phase),
-// FromCache() will not cache the template and instead recompile it on any
-// call (to make changes to a template live instantaneously).
-func (set *TemplateSet) FromCache(filename string) (*Template, error) {
- if set.Debug {
- // Recompile on any request
- return set.FromFile(filename)
- }
- // Cache the template
- cleanedFilename := set.resolveFilename(nil, filename)
-
- set.templateCacheMutex.Lock()
- defer set.templateCacheMutex.Unlock()
-
- tpl, has := set.templateCache[cleanedFilename]
-
- // Cache miss
- if !has {
- tpl, err := set.FromFile(cleanedFilename)
- if err != nil {
- return nil, err
- }
- set.templateCache[cleanedFilename] = tpl
- return tpl, nil
- }
-
- // Cache hit
- return tpl, nil
-}
-
-// FromString loads a template from string and returns a Template instance.
-func (set *TemplateSet) FromString(tpl string) (*Template, error) {
- set.firstTemplateCreated = true
-
- return newTemplateString(set, []byte(tpl))
-}
-
-// FromBytes loads a template from bytes and returns a Template instance.
-func (set *TemplateSet) FromBytes(tpl []byte) (*Template, error) {
- set.firstTemplateCreated = true
-
- return newTemplateString(set, tpl)
-}
-
-// FromFile loads a template from a filename and returns a Template instance.
-func (set *TemplateSet) FromFile(filename string) (*Template, error) {
- set.firstTemplateCreated = true
-
- _, _, fd, err := set.resolveTemplate(nil, filename)
- if err != nil {
- return nil, &Error{
- Filename: filename,
- Sender: "fromfile",
- OrigError: err,
- }
- }
- buf, err := ioutil.ReadAll(fd)
- if err != nil {
- return nil, &Error{
- Filename: filename,
- Sender: "fromfile",
- OrigError: err,
- }
- }
-
- return newTemplate(set, filename, false, buf)
-}
-
-// RenderTemplateString is a shortcut and renders a template string directly.
-func (set *TemplateSet) RenderTemplateString(s string, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromString(s))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-// RenderTemplateBytes is a shortcut and renders template bytes directly.
-func (set *TemplateSet) RenderTemplateBytes(b []byte, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromBytes(b))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-// RenderTemplateFile is a shortcut and renders a template file directly.
-func (set *TemplateSet) RenderTemplateFile(fn string, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromFile(fn))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-func (set *TemplateSet) logf(format string, args ...interface{}) {
- if set.Debug {
- logger.Printf(fmt.Sprintf("[template set: %s] %s", set.name, format), args...)
- }
-}
-
-// Logging function (internally used)
-func logf(format string, items ...interface{}) {
- if debug {
- logger.Printf(format, items...)
- }
-}
-
-var (
- debug bool // internal debugging
- logger = log.New(os.Stdout, "[pongo2] ", log.LstdFlags|log.Lshortfile)
-
- // DefaultLoader allows the default un-sandboxed access to the local file
- // system and is being used by the DefaultSet.
- DefaultLoader = MustNewLocalFileSystemLoader("")
-
- // DefaultSet is a set created for you for convinience reasons.
- DefaultSet = NewSet("default", DefaultLoader)
-
- // Methods on the default set
- FromString = DefaultSet.FromString
- FromBytes = DefaultSet.FromBytes
- FromFile = DefaultSet.FromFile
- FromCache = DefaultSet.FromCache
- RenderTemplateString = DefaultSet.RenderTemplateString
- RenderTemplateFile = DefaultSet.RenderTemplateFile
-
- // Globals for the default set
- Globals = DefaultSet.Globals
-)
diff --git a/vendor/github.com/flosch/pongo2/value.go b/vendor/github.com/flosch/pongo2/value.go
deleted file mode 100644
index 8b49adb7..00000000
--- a/vendor/github.com/flosch/pongo2/value.go
+++ /dev/null
@@ -1,540 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-type Value struct {
- val reflect.Value
- safe bool // used to indicate whether a Value needs explicit escaping in the template
-}
-
-// AsValue converts any given value to a pongo2.Value
-// Usually being used within own functions passed to a template
-// through a Context or within filter functions.
-//
-// Example:
-// AsValue("my string")
-func AsValue(i interface{}) *Value {
- return &Value{
- val: reflect.ValueOf(i),
- }
-}
-
-// AsSafeValue works like AsValue, but does not apply the 'escape' filter.
-func AsSafeValue(i interface{}) *Value {
- return &Value{
- val: reflect.ValueOf(i),
- safe: true,
- }
-}
-
-func (v *Value) getResolvedValue() reflect.Value {
- if v.val.IsValid() && v.val.Kind() == reflect.Ptr {
- return v.val.Elem()
- }
- return v.val
-}
-
-// IsString checks whether the underlying value is a string
-func (v *Value) IsString() bool {
- return v.getResolvedValue().Kind() == reflect.String
-}
-
-// IsBool checks whether the underlying value is a bool
-func (v *Value) IsBool() bool {
- return v.getResolvedValue().Kind() == reflect.Bool
-}
-
-// IsFloat checks whether the underlying value is a float
-func (v *Value) IsFloat() bool {
- return v.getResolvedValue().Kind() == reflect.Float32 ||
- v.getResolvedValue().Kind() == reflect.Float64
-}
-
-// IsInteger checks whether the underlying value is an integer
-func (v *Value) IsInteger() bool {
- return v.getResolvedValue().Kind() == reflect.Int ||
- v.getResolvedValue().Kind() == reflect.Int8 ||
- v.getResolvedValue().Kind() == reflect.Int16 ||
- v.getResolvedValue().Kind() == reflect.Int32 ||
- v.getResolvedValue().Kind() == reflect.Int64 ||
- v.getResolvedValue().Kind() == reflect.Uint ||
- v.getResolvedValue().Kind() == reflect.Uint8 ||
- v.getResolvedValue().Kind() == reflect.Uint16 ||
- v.getResolvedValue().Kind() == reflect.Uint32 ||
- v.getResolvedValue().Kind() == reflect.Uint64
-}
-
-// IsNumber checks whether the underlying value is either an integer
-// or a float.
-func (v *Value) IsNumber() bool {
- return v.IsInteger() || v.IsFloat()
-}
-
-// IsTime checks whether the underlying value is a time.Time.
-func (v *Value) IsTime() bool {
- _, ok := v.Interface().(time.Time)
- return ok
-}
-
-// IsNil checks whether the underlying value is NIL
-func (v *Value) IsNil() bool {
- //fmt.Printf("%+v\n", v.getResolvedValue().Type().String())
- return !v.getResolvedValue().IsValid()
-}
-
-// String returns a string for the underlying value. If this value is not
-// of type string, pongo2 tries to convert it. Currently the following
-// types for underlying values are supported:
-//
-// 1. string
-// 2. int/uint (any size)
-// 3. float (any precision)
-// 4. bool
-// 5. time.Time
-// 6. String() will be called on the underlying value if provided
-//
-// NIL values will lead to an empty string. Unsupported types are leading
-// to their respective type name.
-func (v *Value) String() string {
- if v.IsNil() {
- return ""
- }
-
- switch v.getResolvedValue().Kind() {
- case reflect.String:
- return v.getResolvedValue().String()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return strconv.FormatInt(v.getResolvedValue().Int(), 10)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return strconv.FormatUint(v.getResolvedValue().Uint(), 10)
- case reflect.Float32, reflect.Float64:
- return fmt.Sprintf("%f", v.getResolvedValue().Float())
- case reflect.Bool:
- if v.Bool() {
- return "True"
- }
- return "False"
- case reflect.Struct:
- if t, ok := v.Interface().(fmt.Stringer); ok {
- return t.String()
- }
- }
-
- logf("Value.String() not implemented for type: %s\n", v.getResolvedValue().Kind().String())
- return v.getResolvedValue().String()
-}
-
-// Integer returns the underlying value as an integer (converts the underlying
-// value, if necessary). If it's not possible to convert the underlying value,
-// it will return 0.
-func (v *Value) Integer() int {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return int(v.getResolvedValue().Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return int(v.getResolvedValue().Uint())
- case reflect.Float32, reflect.Float64:
- return int(v.getResolvedValue().Float())
- case reflect.String:
- // Try to convert from string to int (base 10)
- f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
- if err != nil {
- return 0
- }
- return int(f)
- default:
- logf("Value.Integer() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0
- }
-}
-
-// Float returns the underlying value as a float (converts the underlying
-// value, if necessary). If it's not possible to convert the underlying value,
-// it will return 0.0.
-func (v *Value) Float() float64 {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return float64(v.getResolvedValue().Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return float64(v.getResolvedValue().Uint())
- case reflect.Float32, reflect.Float64:
- return v.getResolvedValue().Float()
- case reflect.String:
- // Try to convert from string to float64 (base 10)
- f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
- if err != nil {
- return 0.0
- }
- return f
- default:
- logf("Value.Float() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0.0
- }
-}
-
-// Bool returns the underlying value as bool. If the value is not bool, false
-// will always be returned. If you're looking for true/false-evaluation of the
-// underlying value, have a look on the IsTrue()-function.
-func (v *Value) Bool() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Bool:
- return v.getResolvedValue().Bool()
- default:
- logf("Value.Bool() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// Time returns the underlying value as time.Time.
-// If the underlying value is not a time.Time, it returns the zero value of time.Time.
-func (v *Value) Time() time.Time {
- tm, ok := v.Interface().(time.Time)
- if ok {
- return tm
- }
- return time.Time{}
-}
-
-// IsTrue tries to evaluate the underlying value the Pythonic-way:
-//
-// Returns TRUE in one the following cases:
-//
-// * int != 0
-// * uint != 0
-// * float != 0.0
-// * len(array/chan/map/slice/string) > 0
-// * bool == true
-// * underlying value is a struct
-//
-// Otherwise returns always FALSE.
-func (v *Value) IsTrue() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.getResolvedValue().Int() != 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return v.getResolvedValue().Uint() != 0
- case reflect.Float32, reflect.Float64:
- return v.getResolvedValue().Float() != 0
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return v.getResolvedValue().Len() > 0
- case reflect.Bool:
- return v.getResolvedValue().Bool()
- case reflect.Struct:
- return true // struct instance is always true
- default:
- logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// Negate tries to negate the underlying value. It's mainly used for
-// the NOT-operator and in conjunction with a call to
-// return_value.IsTrue() afterwards.
-//
-// Example:
-// AsValue(1).Negate().IsTrue() == false
-func (v *Value) Negate() *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- if v.Integer() != 0 {
- return AsValue(0)
- }
- return AsValue(1)
- case reflect.Float32, reflect.Float64:
- if v.Float() != 0.0 {
- return AsValue(float64(0.0))
- }
- return AsValue(float64(1.1))
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return AsValue(v.getResolvedValue().Len() == 0)
- case reflect.Bool:
- return AsValue(!v.getResolvedValue().Bool())
- case reflect.Struct:
- return AsValue(false)
- default:
- logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue(true)
- }
-}
-
-// Len returns the length for an array, chan, map, slice or string.
-// Otherwise it will return 0.
-func (v *Value) Len() int {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
- return v.getResolvedValue().Len()
- case reflect.String:
- runes := []rune(v.getResolvedValue().String())
- return len(runes)
- default:
- logf("Value.Len() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0
- }
-}
-
-// Slice slices an array, slice or string. Otherwise it will
-// return an empty []int.
-func (v *Value) Slice(i, j int) *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice:
- return AsValue(v.getResolvedValue().Slice(i, j).Interface())
- case reflect.String:
- runes := []rune(v.getResolvedValue().String())
- return AsValue(string(runes[i:j]))
- default:
- logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue([]int{})
- }
-}
-
-// Index gets the i-th item of an array, slice or string. Otherwise
-// it will return NIL.
-func (v *Value) Index(i int) *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice:
- if i >= v.Len() {
- return AsValue(nil)
- }
- return AsValue(v.getResolvedValue().Index(i).Interface())
- case reflect.String:
- //return AsValue(v.getResolvedValue().Slice(i, i+1).Interface())
- s := v.getResolvedValue().String()
- runes := []rune(s)
- if i < len(runes) {
- return AsValue(string(runes[i]))
- }
- return AsValue("")
- default:
- logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue([]int{})
- }
-}
-
-// Contains checks whether the underlying value (which must be of type struct, map,
-// string, array or slice) contains of another Value (e. g. used to check
-// whether a struct contains of a specific field or a map contains a specific key).
-//
-// Example:
-// AsValue("Hello, World!").Contains(AsValue("World")) == true
-func (v *Value) Contains(other *Value) bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Struct:
- fieldValue := v.getResolvedValue().FieldByName(other.String())
- return fieldValue.IsValid()
- case reflect.Map:
- var mapValue reflect.Value
- switch other.Interface().(type) {
- case int:
- mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
- case string:
- mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
- default:
- logf("Value.Contains() does not support lookup type '%s'\n", other.getResolvedValue().Kind().String())
- return false
- }
-
- return mapValue.IsValid()
- case reflect.String:
- return strings.Contains(v.getResolvedValue().String(), other.String())
-
- case reflect.Slice, reflect.Array:
- for i := 0; i < v.getResolvedValue().Len(); i++ {
- item := v.getResolvedValue().Index(i)
- if other.Interface() == item.Interface() {
- return true
- }
- }
- return false
-
- default:
- logf("Value.Contains() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// CanSlice checks whether the underlying value is of type array, slice or string.
-// You normally would use CanSlice() before using the Slice() operation.
-func (v *Value) CanSlice() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice, reflect.String:
- return true
- }
- return false
-}
-
-// Iterate iterates over a map, array, slice or a string. It calls the
-// function's first argument for every value with the following arguments:
-//
-// idx current 0-index
-// count total amount of items
-// key *Value for the key or item
-// value *Value (only for maps, the respective value for a specific key)
-//
-// If the underlying value has no items or is not one of the types above,
-// the empty function (function's second argument) will be called.
-func (v *Value) Iterate(fn func(idx, count int, key, value *Value) bool, empty func()) {
- v.IterateOrder(fn, empty, false, false)
-}
-
-// IterateOrder behaves like Value.Iterate, but can iterate through an array/slice/string in reverse. Does
-// not affect the iteration through a map because maps don't have any particular order.
-// However, you can force an order using the `sorted` keyword (and even use `reversed sorted`).
-func (v *Value) IterateOrder(fn func(idx, count int, key, value *Value) bool, empty func(), reverse bool, sorted bool) {
- switch v.getResolvedValue().Kind() {
- case reflect.Map:
- keys := sortedKeys(v.getResolvedValue().MapKeys())
- if sorted {
- if reverse {
- sort.Sort(sort.Reverse(keys))
- } else {
- sort.Sort(keys)
- }
- }
- keyLen := len(keys)
- for idx, key := range keys {
- value := v.getResolvedValue().MapIndex(key)
- if !fn(idx, keyLen, &Value{val: key}, &Value{val: value}) {
- return
- }
- }
- if keyLen == 0 {
- empty()
- }
- return // done
- case reflect.Array, reflect.Slice:
- var items valuesList
-
- itemCount := v.getResolvedValue().Len()
- for i := 0; i < itemCount; i++ {
- items = append(items, &Value{val: v.getResolvedValue().Index(i)})
- }
-
- if sorted {
- if reverse {
- sort.Sort(sort.Reverse(items))
- } else {
- sort.Sort(items)
- }
- } else {
- if reverse {
- for i := 0; i < itemCount/2; i++ {
- items[i], items[itemCount-1-i] = items[itemCount-1-i], items[i]
- }
- }
- }
-
- if len(items) > 0 {
- for idx, item := range items {
- if !fn(idx, itemCount, item, nil) {
- return
- }
- }
- } else {
- empty()
- }
- return // done
- case reflect.String:
- if sorted {
- // TODO(flosch): Handle sorted
- panic("TODO: handle sort for type string")
- }
-
- // TODO(flosch): Not utf8-compatible (utf8-decoding necessary)
- charCount := v.getResolvedValue().Len()
- if charCount > 0 {
- if reverse {
- for i := charCount - 1; i >= 0; i-- {
- if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
- return
- }
- }
- } else {
- for i := 0; i < charCount; i++ {
- if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
- return
- }
- }
- }
- } else {
- empty()
- }
- return // done
- default:
- logf("Value.Iterate() not available for type: %s\n", v.getResolvedValue().Kind().String())
- }
- empty()
-}
-
-// Interface gives you access to the underlying value.
-func (v *Value) Interface() interface{} {
- if v.val.IsValid() {
- return v.val.Interface()
- }
- return nil
-}
-
-// EqualValueTo checks whether two values are containing the same value or object.
-func (v *Value) EqualValueTo(other *Value) bool {
- // comparison of uint with int fails using .Interface()-comparison (see issue #64)
- if v.IsInteger() && other.IsInteger() {
- return v.Integer() == other.Integer()
- }
- if v.IsTime() && other.IsTime() {
- return v.Time().Equal(other.Time())
- }
- return v.Interface() == other.Interface()
-}
-
-type sortedKeys []reflect.Value
-
-func (sk sortedKeys) Len() int {
- return len(sk)
-}
-
-func (sk sortedKeys) Less(i, j int) bool {
- vi := &Value{val: sk[i]}
- vj := &Value{val: sk[j]}
- switch {
- case vi.IsInteger() && vj.IsInteger():
- return vi.Integer() < vj.Integer()
- case vi.IsFloat() && vj.IsFloat():
- return vi.Float() < vj.Float()
- default:
- return vi.String() < vj.String()
- }
-}
-
-func (sk sortedKeys) Swap(i, j int) {
- sk[i], sk[j] = sk[j], sk[i]
-}
-
-type valuesList []*Value
-
-func (vl valuesList) Len() int {
- return len(vl)
-}
-
-func (vl valuesList) Less(i, j int) bool {
- vi := vl[i]
- vj := vl[j]
- switch {
- case vi.IsInteger() && vj.IsInteger():
- return vi.Integer() < vj.Integer()
- case vi.IsFloat() && vj.IsFloat():
- return vi.Float() < vj.Float()
- default:
- return vi.String() < vj.String()
- }
-}
-
-func (vl valuesList) Swap(i, j int) {
- vl[i], vl[j] = vl[j], vl[i]
-}
diff --git a/vendor/github.com/flosch/pongo2/variable.go b/vendor/github.com/flosch/pongo2/variable.go
deleted file mode 100644
index 25e2af40..00000000
--- a/vendor/github.com/flosch/pongo2/variable.go
+++ /dev/null
@@ -1,693 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-const (
- varTypeInt = iota
- varTypeIdent
-)
-
-var (
- typeOfValuePtr = reflect.TypeOf(new(Value))
- typeOfExecCtxPtr = reflect.TypeOf(new(ExecutionContext))
-)
-
-type variablePart struct {
- typ int
- s string
- i int
-
- isFunctionCall bool
- callingArgs []functionCallArgument // needed for a function call, represents all argument nodes (INode supports nested function calls)
-}
-
-type functionCallArgument interface {
- Evaluate(*ExecutionContext) (*Value, *Error)
-}
-
-// TODO: Add location tokens
-type stringResolver struct {
- locationToken *Token
- val string
-}
-
-type intResolver struct {
- locationToken *Token
- val int
-}
-
-type floatResolver struct {
- locationToken *Token
- val float64
-}
-
-type boolResolver struct {
- locationToken *Token
- val bool
-}
-
-type variableResolver struct {
- locationToken *Token
-
- parts []*variablePart
-}
-
-type nodeFilteredVariable struct {
- locationToken *Token
-
- resolver IEvaluator
- filterChain []*filterCall
-}
-
-type nodeVariable struct {
- locationToken *Token
- expr IEvaluator
-}
-
-type executionCtxEval struct{}
-
-func (v *nodeFilteredVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := v.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (vr *variableResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := vr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (s *stringResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := s.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (i *intResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := i.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (f *floatResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := f.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (b *boolResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := b.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (v *nodeFilteredVariable) GetPositionToken() *Token {
- return v.locationToken
-}
-
-func (vr *variableResolver) GetPositionToken() *Token {
- return vr.locationToken
-}
-
-func (s *stringResolver) GetPositionToken() *Token {
- return s.locationToken
-}
-
-func (i *intResolver) GetPositionToken() *Token {
- return i.locationToken
-}
-
-func (f *floatResolver) GetPositionToken() *Token {
- return f.locationToken
-}
-
-func (b *boolResolver) GetPositionToken() *Token {
- return b.locationToken
-}
-
-func (s *stringResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(s.val), nil
-}
-
-func (i *intResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(i.val), nil
-}
-
-func (f *floatResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(f.val), nil
-}
-
-func (b *boolResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(b.val), nil
-}
-
-func (s *stringResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (i *intResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (f *floatResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (b *boolResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (nv *nodeVariable) FilterApplied(name string) bool {
- return nv.expr.FilterApplied(name)
-}
-
-func (nv *nodeVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := nv.expr.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if !nv.expr.FilterApplied("safe") && !value.safe && value.IsString() && ctx.Autoescape {
- // apply escape filter
- value, err = filters["escape"](value, nil)
- if err != nil {
- return err
- }
- }
-
- writer.WriteString(value.String())
- return nil
-}
-
-func (executionCtxEval) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(ctx), nil
-}
-
-func (vr *variableResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (vr *variableResolver) String() string {
- parts := make([]string, 0, len(vr.parts))
- for _, p := range vr.parts {
- switch p.typ {
- case varTypeInt:
- parts = append(parts, strconv.Itoa(p.i))
- case varTypeIdent:
- parts = append(parts, p.s)
- default:
- panic("unimplemented")
- }
- }
- return strings.Join(parts, ".")
-}
-
-func (vr *variableResolver) resolve(ctx *ExecutionContext) (*Value, error) {
- var current reflect.Value
- var isSafe bool
-
- for idx, part := range vr.parts {
- if idx == 0 {
- // We're looking up the first part of the variable.
- // First we're having a look in our private
- // context (e. g. information provided by tags, like the forloop)
- val, inPrivate := ctx.Private[vr.parts[0].s]
- if !inPrivate {
- // Nothing found? Then have a final lookup in the public context
- val = ctx.Public[vr.parts[0].s]
- }
- current = reflect.ValueOf(val) // Get the initial value
- } else {
- // Next parts, resolve it from current
-
- // Before resolving the pointer, let's see if we have a method to call
- // Problem with resolving the pointer is we're changing the receiver
- isFunc := false
- if part.typ == varTypeIdent {
- funcValue := current.MethodByName(part.s)
- if funcValue.IsValid() {
- current = funcValue
- isFunc = true
- }
- }
-
- if !isFunc {
- // If current a pointer, resolve it
- if current.Kind() == reflect.Ptr {
- current = current.Elem()
- if !current.IsValid() {
- // Value is not valid (anymore)
- return AsValue(nil), nil
- }
- }
-
- // Look up which part must be called now
- switch part.typ {
- case varTypeInt:
- // Calling an index is only possible for:
- // * slices/arrays/strings
- switch current.Kind() {
- case reflect.String, reflect.Array, reflect.Slice:
- if part.i >= 0 && current.Len() > part.i {
- current = current.Index(part.i)
- } else {
- // In Django, exceeding the length of a list is just empty.
- return AsValue(nil), nil
- }
- default:
- return nil, fmt.Errorf("Can't access an index on type %s (variable %s)",
- current.Kind().String(), vr.String())
- }
- case varTypeIdent:
- // debugging:
- // fmt.Printf("now = %s (kind: %s)\n", part.s, current.Kind().String())
-
- // Calling a field or key
- switch current.Kind() {
- case reflect.Struct:
- current = current.FieldByName(part.s)
- case reflect.Map:
- current = current.MapIndex(reflect.ValueOf(part.s))
- default:
- return nil, fmt.Errorf("Can't access a field by name on type %s (variable %s)",
- current.Kind().String(), vr.String())
- }
- default:
- panic("unimplemented")
- }
- }
- }
-
- if !current.IsValid() {
- // Value is not valid (anymore)
- return AsValue(nil), nil
- }
-
- // If current is a reflect.ValueOf(pongo2.Value), then unpack it
- // Happens in function calls (as a return value) or by injecting
- // into the execution context (e.g. in a for-loop)
- if current.Type() == typeOfValuePtr {
- tmpValue := current.Interface().(*Value)
- current = tmpValue.val
- isSafe = tmpValue.safe
- }
-
- // Check whether this is an interface and resolve it where required
- if current.Kind() == reflect.Interface {
- current = reflect.ValueOf(current.Interface())
- }
-
- // Check if the part is a function call
- if part.isFunctionCall || current.Kind() == reflect.Func {
- // Check for callable
- if current.Kind() != reflect.Func {
- return nil, fmt.Errorf("'%s' is not a function (it is %s)", vr.String(), current.Kind().String())
- }
-
- // Check for correct function syntax and types
- // func(*Value, ...) *Value
- t := current.Type()
- currArgs := part.callingArgs
-
- // If an implicit ExecCtx is needed
- if t.NumIn() > 0 && t.In(0) == typeOfExecCtxPtr {
- currArgs = append([]functionCallArgument{executionCtxEval{}}, currArgs...)
- }
-
- // Input arguments
- if len(currArgs) != t.NumIn() && !(len(currArgs) >= t.NumIn()-1 && t.IsVariadic()) {
- return nil,
- fmt.Errorf("Function input argument count (%d) of '%s' must be equal to the calling argument count (%d).",
- t.NumIn(), vr.String(), len(currArgs))
- }
-
- // Output arguments
- if t.NumOut() != 1 && t.NumOut() != 2 {
- return nil, fmt.Errorf("'%s' must have exactly 1 or 2 output arguments, the second argument must be of type error", vr.String())
- }
-
- // Evaluate all parameters
- var parameters []reflect.Value
-
- numArgs := t.NumIn()
- isVariadic := t.IsVariadic()
- var fnArg reflect.Type
-
- for idx, arg := range currArgs {
- pv, err := arg.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
-
- if isVariadic {
- if idx >= t.NumIn()-1 {
- fnArg = t.In(numArgs - 1).Elem()
- } else {
- fnArg = t.In(idx)
- }
- } else {
- fnArg = t.In(idx)
- }
-
- if fnArg != typeOfValuePtr {
- // Function's argument is not a *pongo2.Value, then we have to check whether input argument is of the same type as the function's argument
- if !isVariadic {
- if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
- return nil, fmt.Errorf("Function input argument %d of '%s' must be of type %s or *pongo2.Value (not %T).",
- idx, vr.String(), fnArg.String(), pv.Interface())
- }
- // Function's argument has another type, using the interface-value
- parameters = append(parameters, reflect.ValueOf(pv.Interface()))
- } else {
- if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
- return nil, fmt.Errorf("Function variadic input argument of '%s' must be of type %s or *pongo2.Value (not %T).",
- vr.String(), fnArg.String(), pv.Interface())
- }
- // Function's argument has another type, using the interface-value
- parameters = append(parameters, reflect.ValueOf(pv.Interface()))
- }
- } else {
- // Function's argument is a *pongo2.Value
- parameters = append(parameters, reflect.ValueOf(pv))
- }
- }
-
- // Check if any of the values are invalid
- for _, p := range parameters {
- if p.Kind() == reflect.Invalid {
- return nil, fmt.Errorf("Calling a function using an invalid parameter")
- }
- }
-
- // Call it and get first return parameter back
- values := current.Call(parameters)
- rv := values[0]
- if t.NumOut() == 2 {
- e := values[1].Interface()
- if e != nil {
- err, ok := e.(error)
- if !ok {
- return nil, fmt.Errorf("The second return value is not an error")
- }
- if err != nil {
- return nil, err
- }
- }
- }
-
- if rv.Type() != typeOfValuePtr {
- current = reflect.ValueOf(rv.Interface())
- } else {
- // Return the function call value
- current = rv.Interface().(*Value).val
- isSafe = rv.Interface().(*Value).safe
- }
- }
-
- if !current.IsValid() {
- // Value is not valid (e. g. NIL value)
- return AsValue(nil), nil
- }
- }
-
- return &Value{val: current, safe: isSafe}, nil
-}
-
-func (vr *variableResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- value, err := vr.resolve(ctx)
- if err != nil {
- return AsValue(nil), ctx.Error(err.Error(), vr.locationToken)
- }
- return value, nil
-}
-
-func (v *nodeFilteredVariable) FilterApplied(name string) bool {
- for _, filter := range v.filterChain {
- if filter.name == name {
- return true
- }
- }
- return false
-}
-
-func (v *nodeFilteredVariable) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- value, err := v.resolver.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
-
- for _, filter := range v.filterChain {
- value, err = filter.Execute(value, ctx)
- if err != nil {
- return nil, err
- }
- }
-
- return value, nil
-}
-
-// IDENT | IDENT.(IDENT|NUMBER)...
-func (p *Parser) parseVariableOrLiteral() (IEvaluator, *Error) {
- t := p.Current()
-
- if t == nil {
- return nil, p.Error("Unexpected EOF, expected a number, string, keyword or identifier.", p.lastToken)
- }
-
- // Is first part a number or a string, there's nothing to resolve (because there's only to return the value then)
- switch t.Typ {
- case TokenNumber:
- p.Consume()
-
- // One exception to the rule that we don't have float64 literals is at the beginning
- // of an expression (or a variable name). Since we know we started with an integer
- // which can't obviously be a variable name, we can check whether the first number
- // is followed by dot (and then a number again). If so we're converting it to a float64.
-
- if p.Match(TokenSymbol, ".") != nil {
- // float64
- t2 := p.MatchType(TokenNumber)
- if t2 == nil {
- return nil, p.Error("Expected a number after the '.'.", nil)
- }
- f, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", t.Val, t2.Val), 64)
- if err != nil {
- return nil, p.Error(err.Error(), t)
- }
- fr := &floatResolver{
- locationToken: t,
- val: f,
- }
- return fr, nil
- }
- i, err := strconv.Atoi(t.Val)
- if err != nil {
- return nil, p.Error(err.Error(), t)
- }
- nr := &intResolver{
- locationToken: t,
- val: i,
- }
- return nr, nil
-
- case TokenString:
- p.Consume()
- sr := &stringResolver{
- locationToken: t,
- val: t.Val,
- }
- return sr, nil
- case TokenKeyword:
- p.Consume()
- switch t.Val {
- case "true":
- br := &boolResolver{
- locationToken: t,
- val: true,
- }
- return br, nil
- case "false":
- br := &boolResolver{
- locationToken: t,
- val: false,
- }
- return br, nil
- default:
- return nil, p.Error("This keyword is not allowed here.", nil)
- }
- }
-
- resolver := &variableResolver{
- locationToken: t,
- }
-
- // First part of a variable MUST be an identifier
- if t.Typ != TokenIdentifier {
- return nil, p.Error("Expected either a number, string, keyword or identifier.", t)
- }
-
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeIdent,
- s: t.Val,
- })
-
- p.Consume() // we consumed the first identifier of the variable name
-
-variableLoop:
- for p.Remaining() > 0 {
- t = p.Current()
-
- if p.Match(TokenSymbol, ".") != nil {
- // Next variable part (can be either NUMBER or IDENT)
- t2 := p.Current()
- if t2 != nil {
- switch t2.Typ {
- case TokenIdentifier:
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeIdent,
- s: t2.Val,
- })
- p.Consume() // consume: IDENT
- continue variableLoop
- case TokenNumber:
- i, err := strconv.Atoi(t2.Val)
- if err != nil {
- return nil, p.Error(err.Error(), t2)
- }
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeInt,
- i: i,
- })
- p.Consume() // consume: NUMBER
- continue variableLoop
- default:
- return nil, p.Error("This token is not allowed within a variable name.", t2)
- }
- } else {
- // EOF
- return nil, p.Error("Unexpected EOF, expected either IDENTIFIER or NUMBER after DOT.",
- p.lastToken)
- }
- } else if p.Match(TokenSymbol, "(") != nil {
- // Function call
- // FunctionName '(' Comma-separated list of expressions ')'
- part := resolver.parts[len(resolver.parts)-1]
- part.isFunctionCall = true
- argumentLoop:
- for {
- if p.Remaining() == 0 {
- return nil, p.Error("Unexpected EOF, expected function call argument list.", p.lastToken)
- }
-
- if p.Peek(TokenSymbol, ")") == nil {
- // No closing bracket, so we're parsing an expression
- exprArg, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- part.callingArgs = append(part.callingArgs, exprArg)
-
- if p.Match(TokenSymbol, ")") != nil {
- // If there's a closing bracket after an expression, we will stop parsing the arguments
- break argumentLoop
- } else {
- // If there's NO closing bracket, there MUST be an comma
- if p.Match(TokenSymbol, ",") == nil {
- return nil, p.Error("Missing comma or closing bracket after argument.", nil)
- }
- }
- } else {
- // We got a closing bracket, so stop parsing arguments
- p.Consume()
- break argumentLoop
- }
-
- }
- // We're done parsing the function call, next variable part
- continue variableLoop
- }
-
- // No dot or function call? Then we're done with the variable parsing
- break
- }
-
- return resolver, nil
-}
-
-func (p *Parser) parseVariableOrLiteralWithFilter() (*nodeFilteredVariable, *Error) {
- v := &nodeFilteredVariable{
- locationToken: p.Current(),
- }
-
- // Parse the variable name
- resolver, err := p.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- v.resolver = resolver
-
- // Parse all the filters
-filterLoop:
- for p.Match(TokenSymbol, "|") != nil {
- // Parse one single filter
- filter, err := p.parseFilter()
- if err != nil {
- return nil, err
- }
-
- // Check sandbox filter restriction
- if _, isBanned := p.template.set.bannedFilters[filter.name]; isBanned {
- return nil, p.Error(fmt.Sprintf("Usage of filter '%s' is not allowed (sandbox restriction active).", filter.name), nil)
- }
-
- v.filterChain = append(v.filterChain, filter)
-
- continue filterLoop
- }
-
- return v, nil
-}
-
-func (p *Parser) parseVariableElement() (INode, *Error) {
- node := &nodeVariable{
- locationToken: p.Current(),
- }
-
- p.Consume() // consume '{{'
-
- expr, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- node.expr = expr
-
- if p.Match(TokenSymbol, "}}") == nil {
- return nil, p.Error("'}}' expected", nil)
- }
-
- return node, nil
-}
diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml
new file mode 100644
index 00000000..0ed62c1a
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -0,0 +1,28 @@
+version: "2"
+
+run:
+ timeout: 1m
+ tests: true
+
+linters:
+ default: none
+ enable: # please keep this alphabetized
+ - asasalint
+ - asciicheck
+ - copyloopvar
+ - dupl
+ - errcheck
+ - forcetypeassert
+ - goconst
+ - gocritic
+ - govet
+ - ineffassign
+ - misspell
+ - musttag
+ - revive
+ - staticcheck
+ - unused
+
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 10
diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md
new file mode 100644
index 00000000..c3569600
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/CHANGELOG.md
@@ -0,0 +1,6 @@
+# CHANGELOG
+
+## v1.0.0-rc1
+
+This is the first logged release. Major changes (including breaking changes)
+have occurred since earlier tags.
diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
new file mode 100644
index 00000000..5d37e294
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+# Contributing
+
+Logr is open to pull-requests, provided they fit within the intended scope of
+the project. Specifically, this library aims to be VERY small and minimalist,
+with no external dependencies.
+
+## Compatibility
+
+This project intends to follow [semantic versioning](http://semver.org) and
+is very strict about compatibility. Any proposed changes MUST follow those
+rules.
+
+## Performance
+
+As a logging library, logr must be as light-weight as possible. Any proposed
+code change must include results of running the [benchmark](./benchmark)
+before and after the change.
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/go-logr/logr/LICENSE
similarity index 100%
rename from vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
rename to vendor/github.com/go-logr/logr/LICENSE
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
new file mode 100644
index 00000000..7c7f0c69
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -0,0 +1,407 @@
+# A minimal logging API for Go
+
+[](https://pkg.go.dev/github.com/go-logr/logr)
+[](https://goreportcard.com/report/github.com/go-logr/logr)
+[](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
+
+logr offers an(other) opinion on how Go programs and libraries can do logging
+without becoming coupled to a particular logging implementation. This is not
+an implementation of logging - it is an API. In fact it is two APIs with two
+different sets of users.
+
+The `Logger` type is intended for application and library authors. It provides
+a relatively small API which can be used everywhere you want to emit logs. It
+defers the actual act of writing logs (to files, to stdout, or whatever) to the
+`LogSink` interface.
+
+The `LogSink` interface is intended for logging library implementers. It is a
+pure interface which can be implemented by logging frameworks to provide the actual logging
+functionality.
+
+This decoupling allows application and library developers to write code in
+terms of `logr.Logger` (which has very low dependency fan-out) while the
+implementation of logging is managed "up stack" (e.g. in or near `main()`.)
+Application developers can then switch out implementations as necessary.
+
+Many people assert that libraries should not be logging, and as such efforts
+like this are pointless. Those people are welcome to convince the authors of
+the tens-of-thousands of libraries that *DO* write logs that they are all
+wrong. In the meantime, logr takes a more practical approach.
+
+## Typical usage
+
+Somewhere, early in an application's life, it will make a decision about which
+logging library (implementation) it actually wants to use. Something like:
+
+```
+ func main() {
+ // ... other setup code ...
+
+ // Create the "root" logger. We have chosen the "logimpl" implementation,
+ // which takes some initial parameters and returns a logr.Logger.
+ logger := logimpl.New(param1, param2)
+
+ // ... other setup code ...
+```
+
+Most apps will call into other libraries, create structures to govern the flow,
+etc. The `logr.Logger` object can be passed to these other libraries, stored
+in structs, or even used as a package-global variable, if needed. For example:
+
+```
+ app := createTheAppObject(logger)
+ app.Run()
+```
+
+Outside of this early setup, no other packages need to know about the choice of
+implementation. They write logs in terms of the `logr.Logger` that they
+received:
+
+```
+ type appObject struct {
+ // ... other fields ...
+ logger logr.Logger
+ // ... other fields ...
+ }
+
+ func (app *appObject) Run() {
+ app.logger.Info("starting up", "timestamp", time.Now())
+
+ // ... app code ...
+```
+
+## Background
+
+If the Go standard library had defined an interface for logging, this project
+probably would not be needed. Alas, here we are.
+
+When the Go developers started developing such an interface with
+[slog](https://github.com/golang/go/issues/56345), they adopted some of the
+logr design but also left out some parts and changed others:
+
+| Feature | logr | slog |
+|---------|------|------|
+| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
+| Low-level API | `LogSink` | `Handler` |
+| Stack unwinding | done by `LogSink` | done by `Logger` |
+| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
+| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
+| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
+| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
+| Passing logger via context | `NewContext`, `FromContext` | no API |
+| Adding a name to a logger | `WithName` | no API |
+| Modify verbosity of log entries in a call chain | `V` | no API |
+| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
+| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
+
+The high-level slog API is explicitly meant to be one of many different APIs
+that can be layered on top of a shared `slog.Handler`. logr is one such
+alternative API, with [interoperability](#slog-interoperability) provided by
+some conversion functions.
+
+### Inspiration
+
+Before you consider this package, please read [this blog post by the
+inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
+he has to say, and it largely aligns with our own experiences.
+
+### Differences from Dave's ideas
+
+The main differences are:
+
+1. Dave basically proposes doing away with the notion of a logging API in favor
+of `fmt.Printf()`. We disagree, especially when you consider things like output
+locations, timestamps, file and line decorations, and structured logging. This
+package restricts the logging API to just 2 types of logs: info and error.
+
+Info logs are things you want to tell the user which are not errors. Error
+logs are, well, errors. If your code receives an `error` from a subordinate
+function call and is logging that `error` *and not returning it*, use error
+logs.
+
+2. Verbosity-levels on info logs. This gives developers a chance to indicate
+arbitrary grades of importance for info logs, without assigning names with
+semantic meaning such as "warning", "trace", and "debug." Superficially this
+may feel very similar, but the primary difference is the lack of semantics.
+Because verbosity is a numerical value, it's safe to assume that an app running
+with higher verbosity means more (and less important) logs will be generated.
+
+## Implementations (non-exhaustive)
+
+There are implementations for the following logging libraries:
+
+- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
+- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
+- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
+- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
+- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
+- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
+- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
+- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
+- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
+- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
+- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
+- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
+- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
+
+## slog interoperability
+
+Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
+and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
+`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
+As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
+slog API.
+
+### Using a `logr.LogSink` as backend for slog
+
+Ideally, a logr sink implementation should support both logr and slog by
+implementing both the normal logr interface(s) and `SlogSink`. Because
+of a conflict in the parameters of the common `Enabled` method, it is [not
+possible to implement both slog.Handler and logr.Sink in the same
+type](https://github.com/golang/go/issues/59110).
+
+If both are supported, log calls can go from the high-level APIs to the backend
+without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
+convert back and forth without adding additional wrappers, with one exception:
+when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
+`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
+log calls.
+
+Such an implementation should also support values that implement specific
+interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
+`slog.GroupValue`). logr does not convert those.
+
+Not supporting slog has several drawbacks:
+- Recording source code locations works correctly if the handler gets called
+ through `slog.Logger`, but may be wrong in other cases. That's because a
+ `logr.Sink` does its own stack unwinding instead of using the program counter
+ provided by the high-level API.
+- slog levels <= 0 can be mapped to logr levels by negating the level without a
+ loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
+ used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
+ because logr does not support "more important than info" levels.
+- The slog group concept is supported by prefixing each key in a key/value
+ pair with the group names, separated by a dot. For structured output like
+ JSON it would be better to group the key/value pairs inside an object.
+- Special slog values and interfaces don't work as expected.
+- The overhead is likely to be higher.
+
+These drawbacks are severe enough that applications using a mixture of slog and
+logr should switch to a different backend.
+
+### Using a `slog.Handler` as backend for logr
+
+Using a plain `slog.Handler` without support for logr works better than the
+other direction:
+- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
+ by negating them.
+- Stack unwinding is done by the `SlogSink` and the resulting program
+ counter is passed to the `slog.Handler`.
+- Names added via `Logger.WithName` are gathered and recorded in an additional
+ attribute with `logger` as key and the names separated by slash as value.
+- `Logger.Error` is turned into a log record with `slog.LevelError` as level
+ and an additional attribute with `err` as key, if an error was provided.
+
+The main drawback is that `logr.Marshaler` will not be supported. Types should
+ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
+with logr implementations without slog support is not important, then
+`slog.Valuer` is sufficient.
+
+### Context support for slog
+
+Storing a logger in a `context.Context` is not supported by
+slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
+used to fill this gap. They store and retrieve a `slog.Logger` pointer
+under the same context key that is also used by `NewContext` and
+`FromContext` for `logr.Logger` value.
+
+When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
+automatically convert the `slog.Logger` to a
+`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
+
+With this approach, binaries which use either slog or logr are as efficient as
+possible with no unnecessary allocations. This is also why the API stores a
+`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
+on retrieval would need to allocate one.
+
+The downside is that switching back and forth needs more allocations. Because
+logr is the API that is already in use by different packages, in particular
+Kubernetes, the recommendation is to use the `logr.Logger` API in code which
+uses contextual logging.
+
+An alternative to adding values to a logger and storing that logger in the
+context is to store the values in the context and to configure a logging
+backend to extract those values when emitting log entries. This only works when
+log calls are passed the context, which is not supported by the logr API.
+
+With the slog API, it is possible, but not
+required. https://github.com/veqryn/slog-context is a package for slog which
+provides additional support code for this approach. It also contains wrappers
+for the context functions in logr, so developers who prefer to not use the logr
+APIs directly can use those instead and the resulting code will still be
+interoperable with logr.
+
+## FAQ
+
+### Conceptual
+
+#### Why structured logging?
+
+- **Structured logs are more easily queryable**: Since you've got
+ key-value pairs, it's much easier to query your structured logs for
+ particular values by filtering on the contents of a particular key --
+ think searching request logs for error codes, Kubernetes reconcilers for
+ the name and namespace of the reconciled object, etc.
+
+- **Structured logging makes it easier to have cross-referenceable logs**:
+ Similarly to searchability, if you maintain conventions around your
+ keys, it becomes easy to gather all log lines related to a particular
+ concept.
+
+- **Structured logs allow better dimensions of filtering**: if you have
+ structure to your logs, you've got more precise control over how much
+ information is logged -- you might choose in a particular configuration
+ to log certain keys but not others, only log lines where a certain key
+ matches a certain value, etc., instead of just having v-levels and names
+ to key off of.
+
+- **Structured logs better represent structured data**: sometimes, the
+ data that you want to log is inherently structured (think tuple-link
+ objects.) Structured logs allow you to preserve that structure when
+ outputting.
+
+#### Why V-levels?
+
+**V-levels give operators an easy way to control the chattiness of log
+operations**. V-levels provide a way for a given package to distinguish
+the relative importance or verbosity of a given log message. Then, if
+a particular logger or package is logging too many messages, the user
+of the package can simply change the v-levels for that library.
+
+#### Why not named levels, like Info/Warning/Error?
+
+Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
+from Dave's ideas](#differences-from-daves-ideas).
+
+#### Why not allow format strings, too?
+
+**Format strings negate many of the benefits of structured logs**:
+
+- They're not easily searchable without resorting to fuzzy searching,
+ regular expressions, etc.
+
+- They don't store structured data well, since contents are flattened into
+ a string.
+
+- They're not cross-referenceable.
+
+- They don't compress easily, since the message is not constant.
+
+(Unless you turn positional parameters into key-value pairs with numerical
+keys, at which point you've gotten key-value logging with meaningless
+keys.)
+
+### Practical
+
+#### Why key-value pairs, and not a map?
+
+Key-value pairs are *much* easier to optimize, especially around
+allocations. Zap (a structured logger that inspired logr's interface) has
+[performance measurements](https://github.com/uber-go/zap#performance)
+that show this quite nicely.
+
+While the interface ends up being a little less obvious, you get
+potentially better performance, plus avoid making users type
+`map[string]string{}` every time they want to log.
+
+#### What if my V-levels differ between libraries?
+
+That's fine. Control your V-levels on a per-logger basis, and use the
+`WithName` method to pass different loggers to different libraries.
+
+Generally, you should take care to ensure that you have relatively
+consistent V-levels within a given logger, however, as this makes deciding
+on what verbosity of logs to request easier.
+
+#### But I really want to use a format string!
+
+That's not actually a question. Assuming your question is "how do
+I convert my mental model of logging with format strings to logging with
+constant messages":
+
+1. Figure out what the error actually is, as you'd write in a TL;DR style,
+ and use that as a message.
+
+2. For every place you'd write a format specifier, look to the word before
+ it, and add that as a key value pair.
+
+For instance, consider the following examples (all taken from spots in the
+Kubernetes codebase):
+
+- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
+ responseCode, err)` becomes `logger.Error(err, "client returned an
+ error", "code", responseCode)`
+
+- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
+ seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
+ response when requesting url", "attempt", retries, "after
+ seconds", seconds, "url", url)`
+
+If you *really* must use a format string, use it in a key's value, and
+call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
+reflect over type %T")` becomes `logger.Info("unable to reflect over
+type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
+this is necessary should be few and far between.
+
+#### How do I choose my V-levels?
+
+This is basically the only hard constraint: increase V-levels to denote
+more verbose or more debug-y logs.
+
+Otherwise, you can start out with `0` as "you always want to see this",
+`1` as "common logging that you might *possibly* want to turn off", and
+`10` as "I would like to performance-test your log collection stack."
+
+Then gradually choose levels in between as you need them, working your way
+down from 10 (for debug and trace style logs) and up from 1 (for chattier
+info-type logs). For reference, slog pre-defines -4 for debug logs
+(corresponds to 4 in logr), which matches what is
+[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
+
+#### How do I choose my keys?
+
+Keys are fairly flexible, and can hold more or less any string
+value. For best compatibility with implementations and consistency
+with existing code in other projects, there are a few conventions you
+should consider.
+
+- Make your keys human-readable.
+- Constant keys are generally a good idea.
+- Be consistent across your codebase.
+- Keys should naturally match parts of the message string.
+- Use lower case for simple keys and
+ [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
+ more complex ones. Kubernetes is one example of a project that has
+ [adopted that
+ convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
+
+While key names are mostly unrestricted (and spaces are acceptable),
+it's generally a good idea to stick to printable ascii characters, or at
+least match the general character set of your log lines.
+
+#### Why should keys be constant values?
+
+The point of structured logging is to make later log processing easier. Your
+keys are, effectively, the schema of each log message. If you use different
+keys across instances of the same log line, you will make your structured logs
+much harder to use. `Sprintf()` is for values, not for keys!
+
+#### Why is this not a pure interface?
+
+The Logger type is implemented as a struct in order to allow the Go compiler to
+optimize things like high-V `Info` logs that are not triggered. Not all of
+these implementations are implemented yet, but this structure was suggested as
+a way to ensure they *can* be implemented. All of the real work is behind the
+`LogSink` interface.
+
+[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md
new file mode 100644
index 00000000..1ca756fc
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/SECURITY.md
@@ -0,0 +1,18 @@
+# Security Policy
+
+If you have discovered a security vulnerability in this project, please report it
+privately. **Do not disclose it as a public issue.** This gives us time to work with you
+to fix the issue before public exposure, reducing the chance that the exploit will be
+used before a patch is released.
+
+You may submit the report in the following ways:
+
+- send an email to go-logr-security@googlegroups.com
+- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
+
+Please provide the following information in your report:
+
+- A description of the vulnerability and its impact
+- How to reproduce the issue
+
+We ask that you give us 90 days to work on a fix before public exposure.
diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go
new file mode 100644
index 00000000..de8bcc3a
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
+// the value is always a Logger value. With Go >= 1.21, the value can be a
+// Logger value or a slog.Logger pointer.
+type contextKey struct{}
+
+// notFoundError exists to carry an IsNotFound method.
+type notFoundError struct{}
+
+func (notFoundError) Error() string {
+ return "no logr.Logger was present"
+}
+
+func (notFoundError) IsNotFound() bool {
+ return true
+}
diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go
new file mode 100644
index 00000000..f012f9a1
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_noslog.go
@@ -0,0 +1,49 @@
+//go:build !go1.21
+// +build !go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v, nil
+ }
+
+ return Logger{}, notFoundError{}
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v
+ }
+
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go
new file mode 100644
index 00000000..065ef0b8
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_slog.go
@@ -0,0 +1,83 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return Logger{}, notFoundError{}
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return v, nil
+ case *slog.Logger:
+ return FromSlogHandler(v.Handler()), nil
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
+func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return nil
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return slog.New(ToSlogHandler(v))
+ case *slog.Logger:
+ return v
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if logger, err := FromContext(ctx); err == nil {
+ return logger
+ }
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
+
+// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
+// provided slog.Logger.
+func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go
new file mode 100644
index 00000000..99fe8be9
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/discard.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2020 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// Discard returns a Logger that discards all messages logged to it. It can be
+// used whenever the caller is not interested in the logs. Logger instances
+// produced by this function always compare as equal.
+func Discard() Logger {
+ return New(nil)
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
new file mode 100644
index 00000000..b22c57d7
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -0,0 +1,914 @@
+/*
+Copyright 2021 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package funcr implements formatting of structured log messages and
+// optionally captures the call site and timestamp.
+//
+// The simplest way to use it is via its implementation of a
+// github.com/go-logr/logr.LogSink with output through an arbitrary
+// "write" function. See New and NewJSON for details.
+//
+// # Custom LogSinks
+//
+// For users who need more control, a funcr.Formatter can be embedded inside
+// your own custom LogSink implementation. This is useful when the LogSink
+// needs to implement additional methods, for example.
+//
+// # Formatting
+//
+// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
+// values which are being logged. When rendering a struct, funcr will use Go's
+// standard JSON tags (all except "string").
+package funcr
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-logr/logr"
+)
+
+// New returns a logr.Logger which is implemented by an arbitrary function.
+func New(fn func(prefix, args string), opts Options) logr.Logger {
+ return logr.New(newSink(fn, NewFormatter(opts)))
+}
+
+// NewJSON returns a logr.Logger which is implemented by an arbitrary function
+// and produces JSON output.
+func NewJSON(fn func(obj string), opts Options) logr.Logger {
+ fnWrapper := func(_, obj string) {
+ fn(obj)
+ }
+ return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
+}
+
+// Underlier exposes access to the underlying logging function. Since
+// callers only have a logr.Logger, they have to know which
+// implementation is in use, so this interface is less of an
+// abstraction and more of a way to test type conversion.
+type Underlier interface {
+ GetUnderlying() func(prefix, args string)
+}
+
+func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
+ l := &fnlogger{
+ Formatter: formatter,
+ write: fn,
+ }
+ // For skipping fnlogger.Info and fnlogger.Error.
+ l.AddCallDepth(1) // via Formatter
+ return l
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+ // LogCaller tells funcr to add a "caller" key to some or all log lines.
+ // This has some overhead, so some users might not want it.
+ LogCaller MessageClass
+
+ // LogCallerFunc tells funcr to also log the calling function name. This
+ // has no effect if caller logging is not enabled (see Options.LogCaller).
+ LogCallerFunc bool
+
+ // LogTimestamp tells funcr to add a "ts" key to log lines. This has some
+ // overhead, so some users might not want it.
+ LogTimestamp bool
+
+ // TimestampFormat tells funcr how to render timestamps when LogTimestamp
+ // is enabled. If not specified, a default format will be used. For more
+ // details, see docs for Go's time.Layout.
+ TimestampFormat string
+
+ // LogInfoLevel tells funcr what key to use to log the info level.
+ // If not specified, the info level will be logged as "level".
+ // If this is set to "", the info level will not be logged at all.
+ LogInfoLevel *string
+
+ // Verbosity tells funcr which V logs to produce. Higher values enable
+ // more logs. Info logs at or below this level will be written, while logs
+ // above this level will be discarded.
+ Verbosity int
+
+ // RenderBuiltinsHook allows users to mutate the list of key-value pairs
+ // while a log line is being rendered. The kvList argument follows logr
+ // conventions - each pair of slice elements is comprised of a string key
+ // and an arbitrary value (verified and sanitized before calling this
+ // hook). The value returned must follow the same conventions. This hook
+ // can be used to audit or modify logged data. For example, you might want
+ // to prefix all of funcr's built-in keys with some string. This hook is
+ // only called for built-in (provided by funcr itself) key-value pairs.
+ // Equivalent hooks are offered for key-value pairs saved via
+ // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
+ // for user-provided pairs (see RenderArgsHook).
+ RenderBuiltinsHook func(kvList []any) []any
+
+ // RenderValuesHook is the same as RenderBuiltinsHook, except that it is
+ // only called for key-value pairs saved via logr.Logger.WithValues. See
+ // RenderBuiltinsHook for more details.
+ RenderValuesHook func(kvList []any) []any
+
+ // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
+ // called for key-value pairs passed directly to Info and Error. See
+ // RenderBuiltinsHook for more details.
+ RenderArgsHook func(kvList []any) []any
+
+ // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
+ // that contains a struct, etc.) it may log. Every time it finds a struct,
+ // slice, array, or map the depth is increased by one. When the maximum is
+ // reached, the value will be converted to a string indicating that the max
+ // depth has been exceeded. If this field is not specified, a default
+ // value will be used.
+ MaxLogDepth int
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+ // None ignores all message classes.
+ None MessageClass = iota
+ // All considers all message classes.
+ All
+ // Info only considers info messages.
+ Info
+ // Error only considers error messages.
+ Error
+)
+
+// fnlogger inherits some of its LogSink implementation from Formatter
+// and just needs to add some glue code.
+type fnlogger struct {
+ Formatter
+ write func(prefix, args string)
+}
+
+func (l fnlogger) WithName(name string) logr.LogSink {
+ l.AddName(name) // via Formatter
+ return &l
+}
+
+func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
+ l.AddValues(kvList) // via Formatter
+ return &l
+}
+
+func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
+ l.AddCallDepth(depth) // via Formatter
+ return &l
+}
+
+func (l fnlogger) Info(level int, msg string, kvList ...any) {
+ prefix, args := l.FormatInfo(level, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) Error(err error, msg string, kvList ...any) {
+ prefix, args := l.FormatError(err, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) GetUnderlying() func(prefix, args string) {
+ return l.write
+}
+
+// Assert conformance to the interfaces.
+var _ logr.LogSink = &fnlogger{}
+var _ logr.CallDepthLogSink = &fnlogger{}
+var _ Underlier = &fnlogger{}
+
+// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
+func NewFormatter(opts Options) Formatter {
+ return newFormatter(opts, outputKeyValue)
+}
+
+// NewFormatterJSON constructs a Formatter which emits strict JSON.
+func NewFormatterJSON(opts Options) Formatter {
+ return newFormatter(opts, outputJSON)
+}
+
+// Defaults for Options.
+const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
+const defaultMaxLogDepth = 16
+
+func newFormatter(opts Options, outfmt outputFormat) Formatter {
+ if opts.TimestampFormat == "" {
+ opts.TimestampFormat = defaultTimestampFormat
+ }
+ if opts.MaxLogDepth == 0 {
+ opts.MaxLogDepth = defaultMaxLogDepth
+ }
+ if opts.LogInfoLevel == nil {
+ opts.LogInfoLevel = new(string)
+ *opts.LogInfoLevel = "level"
+ }
+ f := Formatter{
+ outputFormat: outfmt,
+ prefix: "",
+ values: nil,
+ depth: 0,
+ opts: &opts,
+ }
+ return f
+}
+
+// Formatter is an opaque struct which can be embedded in a LogSink
+// implementation. It should be constructed with NewFormatter. Some of
+// its methods directly implement logr.LogSink.
+type Formatter struct {
+ outputFormat outputFormat
+ prefix string
+ values []any
+ valuesStr string
+ depth int
+ opts *Options
+ groupName string // for slog groups
+ groups []groupDef
+}
+
+// outputFormat indicates which outputFormat to use.
+type outputFormat int
+
+const (
+ // outputKeyValue emits a JSON-like key=value format, but not strict JSON.
+ outputKeyValue outputFormat = iota
+ // outputJSON emits strict JSON.
+ outputJSON
+)
+
+// groupDef represents a saved group. The values may be empty, but we don't
+// know if we need to render the group until the final record is rendered.
+type groupDef struct {
+ name string
+ values string
+}
+
+// PseudoStruct is a list of key-value pairs that gets logged as a struct.
+type PseudoStruct []any
+
+// render produces a log line, ready to use.
+func (f Formatter) render(builtins, args []any) string {
+ // Empirically bytes.Buffer is faster than strings.Builder for this.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('{') // for the whole record
+ }
+
+ // Render builtins
+ vals := builtins
+ if hook := f.opts.RenderBuiltinsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, false) // keys are ours, no need to escape
+ continuing := len(builtins) > 0
+
+ // Turn the inner-most group into a string
+ argsStr := func() string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ vals = args
+ if hook := f.opts.RenderArgsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, true) // escape user-provided keys
+
+ return buf.String()
+ }()
+
+ // Render the stack of groups from the inside out.
+ bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
+ for i := len(f.groups) - 1; i >= 0; i-- {
+ grp := &f.groups[i]
+ if grp.values == "" && bodyStr == "" {
+ // no contents, so we must elide the whole group
+ continue
+ }
+ bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
+ }
+
+ if bodyStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(bodyStr)
+ }
+
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('}') // for the whole record
+ }
+
+ return buf.String()
+}
+
+// renderGroup returns a string representation of the named group with rendered
+// values and args. If the name is empty, this will return the values and args,
+// joined. If the name is not empty, this will return a single key-value pair,
+// where the value is a grouping of the values and args. If the values and
+// args are both empty, this will return an empty string, even if the name was
+// specified.
+func (f Formatter) renderGroup(name string, values string, args string) string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ needClosingBrace := false
+ if name != "" && (values != "" || args != "") {
+ buf.WriteString(f.quoted(name, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{')
+ needClosingBrace = true
+ }
+
+ continuing := false
+ if values != "" {
+ buf.WriteString(values)
+ continuing = true
+ }
+
+ if args != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(args)
+ }
+
+ if needClosingBrace {
+ buf.WriteByte('}')
+ }
+
+ return buf.String()
+}
+
+// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
+// true, the keys are assumed to have non-JSON-compatible characters in them
+// and must be evaluated for escapes.
+//
+// This function returns a potentially modified version of kvList, which
+// ensures that there is a value for every key (adding a value if needed) and
+// that each key is a string (substituting a key if needed).
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
+ // This logic overlaps with sanitize() but saves one type-cast per key,
+ // which can be measurable.
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ copied := false
+ for i := 0; i < len(kvList); i += 2 {
+ k, ok := kvList[i].(string)
+ if !ok {
+ if !copied {
+ newList := make([]any, len(kvList))
+ copy(newList, kvList)
+ kvList = newList
+ copied = true
+ }
+ k = f.nonStringKey(kvList[i])
+ kvList[i] = k
+ }
+ v := kvList[i+1]
+
+ if i > 0 {
+ if f.outputFormat == outputJSON {
+ buf.WriteByte(f.comma())
+ } else {
+ // In theory the format could be something we don't understand. In
+ // practice, we control it, so it won't be.
+ buf.WriteByte(' ')
+ }
+ }
+
+ buf.WriteString(f.quoted(k, escapeKeys))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.pretty(v))
+ }
+ return kvList
+}
+
+func (f Formatter) quoted(str string, escape bool) string {
+ if escape {
+ return prettyString(str)
+ }
+ // this is faster
+ return `"` + str + `"`
+}
+
+func (f Formatter) comma() byte {
+ if f.outputFormat == outputJSON {
+ return ','
+ }
+ return ' '
+}
+
+func (f Formatter) colon() byte {
+ if f.outputFormat == outputJSON {
+ return ':'
+ }
+ return '='
+}
+
+func (f Formatter) pretty(value any) string {
+ return f.prettyWithFlags(value, 0, 0)
+}
+
+const (
+ flagRawStruct = 0x1 // do not print braces on structs
+)
+
+// TODO: This is not fast. Most of the overhead goes here.
+func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
+ if depth > f.opts.MaxLogDepth {
+ return `""`
+ }
+
+ // Handle types that take full control of logging.
+ if v, ok := value.(logr.Marshaler); ok {
+ // Replace the value with what the type wants to get logged.
+ // That then gets handled below via reflection.
+ value = invokeMarshaler(v)
+ }
+
+ // Handle types that want to format themselves.
+ switch v := value.(type) {
+ case fmt.Stringer:
+ value = invokeStringer(v)
+ case error:
+ value = invokeError(v)
+ }
+
+ // Handling the most common types without reflect is a small perf win.
+ switch v := value.(type) {
+ case bool:
+ return strconv.FormatBool(v)
+ case string:
+ return prettyString(v)
+ case int:
+ return strconv.FormatInt(int64(v), 10)
+ case int8:
+ return strconv.FormatInt(int64(v), 10)
+ case int16:
+ return strconv.FormatInt(int64(v), 10)
+ case int32:
+ return strconv.FormatInt(int64(v), 10)
+ case int64:
+ return strconv.FormatInt(int64(v), 10)
+ case uint:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint8:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint16:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint32:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint64:
+ return strconv.FormatUint(v, 10)
+ case uintptr:
+ return strconv.FormatUint(uint64(v), 10)
+ case float32:
+ return strconv.FormatFloat(float64(v), 'f', -1, 32)
+ case float64:
+ return strconv.FormatFloat(v, 'f', -1, 64)
+ case complex64:
+ return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
+ case complex128:
+ return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
+ case PseudoStruct:
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ v = f.sanitize(v)
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ for i := 0; i < len(v); i += 2 {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ k, _ := v[i].(string) // sanitize() above means no need to check success
+ // arbitrary keys might need escaping
+ buf.WriteString(prettyString(k))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, 256))
+ t := reflect.TypeOf(value)
+ if t == nil {
+ return "null"
+ }
+ v := reflect.ValueOf(value)
+ switch t.Kind() {
+ case reflect.Bool:
+ return strconv.FormatBool(v.Bool())
+ case reflect.String:
+ return prettyString(v.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(int64(v.Int()), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(uint64(v.Uint()), 10)
+ case reflect.Float32:
+ return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
+ case reflect.Float64:
+ return strconv.FormatFloat(v.Float(), 'f', -1, 64)
+ case reflect.Complex64:
+ return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
+ case reflect.Complex128:
+ return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
+ case reflect.Struct:
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ printComma := false // testing i>0 is not enough because of JSON omitted fields
+ for i := 0; i < t.NumField(); i++ {
+ fld := t.Field(i)
+ if fld.PkgPath != "" {
+ // reflect says this field is only defined for non-exported fields.
+ continue
+ }
+ if !v.Field(i).CanInterface() {
+ // reflect isn't clear exactly what this means, but we can't use it.
+ continue
+ }
+ name := ""
+ omitempty := false
+ if tag, found := fld.Tag.Lookup("json"); found {
+ if tag == "-" {
+ continue
+ }
+ if comma := strings.Index(tag, ","); comma != -1 {
+ if n := tag[:comma]; n != "" {
+ name = n
+ }
+ rest := tag[comma:]
+ if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
+ omitempty = true
+ }
+ } else {
+ name = tag
+ }
+ }
+ if omitempty && isEmpty(v.Field(i)) {
+ continue
+ }
+ if printComma {
+ buf.WriteByte(f.comma())
+ }
+ printComma = true // if we got here, we are rendering a field
+ if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
+ continue
+ }
+ if name == "" {
+ name = fld.Name
+ }
+ // field names can't contain characters which need escaping
+ buf.WriteString(f.quoted(name, false))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ case reflect.Slice, reflect.Array:
+ // If this is outputing as JSON make sure this isn't really a json.RawMessage.
+ // If so just emit "as-is" and don't pretty it as that will just print
+ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
+ if f.outputFormat == outputJSON {
+ if rm, ok := value.(json.RawMessage); ok {
+ // If it's empty make sure we emit an empty value as the array style would below.
+ if len(rm) > 0 {
+ buf.Write(rm)
+ } else {
+ buf.WriteString("null")
+ }
+ return buf.String()
+ }
+ }
+ buf.WriteByte('[')
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ e := v.Index(i)
+ buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
+ }
+ buf.WriteByte(']')
+ return buf.String()
+ case reflect.Map:
+ buf.WriteByte('{')
+ // This does not sort the map keys, for best perf.
+ it := v.MapRange()
+ i := 0
+ for it.Next() {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ // If a map key supports TextMarshaler, use it.
+ keystr := ""
+ if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
+ txt, err := m.MarshalText()
+ if err != nil {
+ keystr = fmt.Sprintf("", err.Error())
+ } else {
+ keystr = string(txt)
+ }
+ keystr = prettyString(keystr)
+ } else {
+ // prettyWithFlags will produce already-escaped values
+ keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
+ if t.Key().Kind() != reflect.String {
+ // JSON only does string keys. Unlike Go's standard JSON, we'll
+ // convert just about anything to a string.
+ keystr = prettyString(keystr)
+ }
+ }
+ buf.WriteString(keystr)
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
+ i++
+ }
+ buf.WriteByte('}')
+ return buf.String()
+ case reflect.Ptr, reflect.Interface:
+ if v.IsNil() {
+ return "null"
+ }
+ return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
+ }
+ return fmt.Sprintf(`""`, t.Kind().String())
+}
+
+func prettyString(s string) string {
+ // Avoid escaping (which does allocations) if we can.
+ if needsEscape(s) {
+ return strconv.Quote(s)
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ b.WriteByte('"')
+ b.WriteString(s)
+ b.WriteByte('"')
+ return b.String()
+}
+
+// needsEscape determines whether the input string needs to be escaped or not,
+// without doing any allocations.
+func needsEscape(s string) bool {
+ for _, r := range s {
+ if !strconv.IsPrint(r) || r == '\\' || r == '"' {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmpty(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Complex64, reflect.Complex128:
+ return v.Complex() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func invokeMarshaler(m logr.Marshaler) (ret any) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return m.MarshalLog()
+}
+
+func invokeStringer(s fmt.Stringer) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return s.String()
+}
+
+func invokeError(e error) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return e.Error()
+}
+
+// Caller represents the original call site for a log line, after considering
+// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
+// Line fields will always be provided, while the Func field is optional.
+// Users can set the render hook fields in Options to examine logged key-value
+// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
+// field is enabled for the given MessageClass.
+type Caller struct {
+ // File is the basename of the file for this call site.
+ File string `json:"file"`
+ // Line is the line number in the file for this call site.
+ Line int `json:"line"`
+ // Func is the function name for this call site, or empty if
+ // Options.LogCallerFunc is not enabled.
+ Func string `json:"function,omitempty"`
+}
+
+func (f Formatter) caller() Caller {
+ // +1 for this frame, +1 for Info/Error.
+ pc, file, line, ok := runtime.Caller(f.depth + 2)
+ if !ok {
+ return Caller{"", 0, ""}
+ }
+ fn := ""
+ if f.opts.LogCallerFunc {
+ if fp := runtime.FuncForPC(pc); fp != nil {
+ fn = fp.Name()
+ }
+ }
+
+ return Caller{filepath.Base(file), line, fn}
+}
+
+const noValue = ""
+
+func (f Formatter) nonStringKey(v any) string {
+ return fmt.Sprintf("", f.snippet(v))
+}
+
+// snippet produces a short snippet string of an arbitrary value.
+func (f Formatter) snippet(v any) string {
+ const snipLen = 16
+
+ snip := f.pretty(v)
+ if len(snip) > snipLen {
+ snip = snip[:snipLen]
+ }
+ return snip
+}
+
+// sanitize ensures that a list of key-value pairs has a value for every key
+// (adding a value if needed) and that each key is a string (substituting a key
+// if needed).
+func (f Formatter) sanitize(kvList []any) []any {
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ for i := 0; i < len(kvList); i += 2 {
+ _, ok := kvList[i].(string)
+ if !ok {
+ kvList[i] = f.nonStringKey(kvList[i])
+ }
+ }
+ return kvList
+}
+
+// startGroup opens a new group scope (basically a sub-struct), which locks all
+// the current saved values and starts them anew. This is needed to satisfy
+// slog.
+func (f *Formatter) startGroup(name string) {
+ // Unnamed groups are just inlined.
+ if name == "" {
+ return
+ }
+
+ n := len(f.groups)
+ f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
+
+ // Start collecting new values.
+ f.groupName = name
+ f.valuesStr = ""
+ f.values = nil
+}
+
+// Init configures this Formatter from runtime info, such as the call depth
+// imposed by logr itself.
+// Note that this receiver is a pointer, so depth can be saved.
+func (f *Formatter) Init(info logr.RuntimeInfo) {
+ f.depth += info.CallDepth
+}
+
+// Enabled checks whether an info message at the given level should be logged.
+func (f Formatter) Enabled(level int) bool {
+ return level <= f.opts.Verbosity
+}
+
+// GetDepth returns the current depth of this Formatter. This is useful for
+// implementations which do their own caller attribution.
+func (f Formatter) GetDepth() int {
+ return f.depth
+}
+
+// FormatInfo renders an Info log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Info {
+ args = append(args, "caller", f.caller())
+ }
+ if key := *f.opts.LogInfoLevel; key != "" {
+ args = append(args, key, level)
+ }
+ args = append(args, "msg", msg)
+ return prefix, f.render(args, kvList)
+}
+
+// FormatError renders an Error log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Error {
+ args = append(args, "caller", f.caller())
+ }
+ args = append(args, "msg", msg)
+ var loggableErr any
+ if err != nil {
+ loggableErr = err.Error()
+ }
+ args = append(args, "error", loggableErr)
+ return prefix, f.render(args, kvList)
+}
+
+// AddName appends the specified name. funcr uses '/' characters to separate
+// name elements. Callers should not pass '/' in the provided name string, but
+// this library does not actually enforce that.
+func (f *Formatter) AddName(name string) {
+ if len(f.prefix) > 0 {
+ f.prefix += "/"
+ }
+ f.prefix += name
+}
+
+// AddValues adds key-value pairs to the set of saved values to be logged with
+// each log line.
+func (f *Formatter) AddValues(kvList []any) {
+ // Three slice args forces a copy.
+ n := len(f.values)
+ f.values = append(f.values[:n:n], kvList...)
+
+ vals := f.values
+ if hook := f.opts.RenderValuesHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+
+ // Pre-render values, so we don't have to do it on each Info/Error call.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ f.flatten(buf, vals, true) // escape user-provided keys
+ f.valuesStr = buf.String()
+}
+
+// AddCallDepth increases the number of stack-frames to skip when attributing
+// the log line to a file and line.
+func (f *Formatter) AddCallDepth(depth int) {
+ f.depth += depth
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go
new file mode 100644
index 00000000..7bd84761
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go
@@ -0,0 +1,105 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package funcr
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/go-logr/logr"
+)
+
+var _ logr.SlogSink = &fnlogger{}
+
+const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
+
+func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, kvList)
+ return true
+ })
+
+ if record.Level >= slog.LevelError {
+ l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, kvList)
+ }
+ l.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithGroup(name string) logr.SlogSink {
+ l.startGroup(name)
+ return &l
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, grpKVs)
+ }
+ if attr.Key == "" {
+ // slog says we have to inline these
+ kvList = append(kvList, grpKVs...)
+ } else {
+ kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
+ }
+ } else if attr.Key != "" {
+ kvList = append(kvList, attr.Key, attrVal.Any())
+ }
+
+ return kvList
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l fnlogger) levelFromSlog(level slog.Level) int {
+ result := -level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
new file mode 100644
index 00000000..b4428e10
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -0,0 +1,520 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This design derives from Dave Cheney's blog:
+// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
+
+// Package logr defines a general-purpose logging API and abstract interfaces
+// to back that API. Packages in the Go ecosystem can depend on this package,
+// while callers can implement logging with whatever backend is appropriate.
+//
+// # Usage
+//
+// Logging is done using a Logger instance. Logger is a concrete type with
+// methods, which defers the actual logging to a LogSink interface. The main
+// methods of Logger are Info() and Error(). Arguments to Info() and Error()
+// are key/value pairs rather than printf-style formatted strings, emphasizing
+// "structured logging".
+//
+// With Go's standard log package, we might write:
+//
+// log.Printf("setting target value %s", targetValue)
+//
+// With logr's structured logging, we'd write:
+//
+// logger.Info("setting target", "value", targetValue)
+//
+// Errors are much the same. Instead of:
+//
+// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
+//
+// We'd write:
+//
+// logger.Error(err, "failed to open the pod bay door", "user", user)
+//
+// Info() and Error() are very similar, but they are separate methods so that
+// LogSink implementations can choose to do things like attach additional
+// information (such as stack traces) on calls to Error(). Error() messages are
+// always logged, regardless of the current verbosity. If there is no error
+// instance available, passing nil is valid.
+//
+// # Verbosity
+//
+// Often we want to log information only when the application in "verbose
+// mode". To write log lines that are more verbose, Logger has a V() method.
+// The higher the V-level of a log line, the less critical it is considered.
+// Log-lines with V-levels that are not enabled (as per the LogSink) will not
+// be written. Level V(0) is the default, and logger.V(0).Info() has the same
+// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
+// Error messages do not have a verbosity level and are always logged.
+//
+// Where we might have written:
+//
+// if flVerbose >= 2 {
+// log.Printf("an unusual thing happened")
+// }
+//
+// We can write:
+//
+// logger.V(2).Info("an unusual thing happened")
+//
+// # Logger Names
+//
+// Logger instances can have name strings so that all messages logged through
+// that instance have additional context. For example, you might want to add
+// a subsystem name:
+//
+// logger.WithName("compactor").Info("started", "time", time.Now())
+//
+// The WithName() method returns a new Logger, which can be passed to
+// constructors or other functions for further use. Repeated use of WithName()
+// will accumulate name "segments". These name segments will be joined in some
+// way by the LogSink implementation. It is strongly recommended that name
+// segments contain simple identifiers (letters, digits, and hyphen), and do
+// not contain characters that could muddle the log output or confuse the
+// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
+// quotes, etc).
+//
+// # Saved Values
+//
+// Logger instances can store any number of key/value pairs, which will be
+// logged alongside all messages logged through that instance. For example,
+// you might want to create a Logger instance per managed object:
+//
+// With the standard log package, we might write:
+//
+// log.Printf("decided to set field foo to value %q for object %s/%s",
+// targetValue, object.Namespace, object.Name)
+//
+// With logr we'd write:
+//
+// // Elsewhere: set up the logger to log the object name.
+// obj.logger = mainLogger.WithValues(
+// "name", obj.name, "namespace", obj.namespace)
+//
+// // later on...
+// obj.logger.Info("setting foo", "value", targetValue)
+//
+// # Best Practices
+//
+// Logger has very few hard rules, with the goal that LogSink implementations
+// might have a lot of freedom to differentiate. There are, however, some
+// things to consider.
+//
+// The log message consists of a constant message attached to the log line.
+// This should generally be a simple description of what's occurring, and should
+// never be a format string. Variable information can then be attached using
+// named values.
+//
+// Keys are arbitrary strings, but should generally be constant values. Values
+// may be any Go value, but how the value is formatted is determined by the
+// LogSink implementation.
+//
+// Logger instances are meant to be passed around by value. Code that receives
+// such a value can call its methods without having to check whether the
+// instance is ready for use.
+//
+// The zero logger (= Logger{}) is identical to Discard() and discards all log
+// entries. Code that receives a Logger by value can simply call it, the methods
+// will never crash. For cases where passing a logger is optional, a pointer to Logger
+// should be used.
+//
+// # Key Naming Conventions
+//
+// Keys are not strictly required to conform to any specification or regex, but
+// it is recommended that they:
+// - be human-readable and meaningful (not auto-generated or simple ordinals)
+// - be constant (not dependent on input data)
+// - contain only printable characters
+// - not contain whitespace or punctuation
+// - use lower case for simple keys and lowerCamelCase for more complex ones
+//
+// These guidelines help ensure that log data is processed properly regardless
+// of the log implementation. For example, log implementations will try to
+// output JSON data or will store data for later database (e.g. SQL) queries.
+//
+// While users are generally free to use key names of their choice, it's
+// generally best to avoid using the following keys, as they're frequently used
+// by implementations:
+// - "caller": the calling information (file/line) of a particular log line
+// - "error": the underlying error value in the `Error` method
+// - "level": the log level
+// - "logger": the name of the associated logger
+// - "msg": the log message
+// - "stacktrace": the stack trace associated with a particular log line or
+// error (often from the `Error` message)
+// - "ts": the timestamp for a log line
+//
+// Implementations are encouraged to make use of these keys to represent the
+// above concepts, when necessary (for example, in a pure-JSON output form, it
+// would be necessary to represent at least message and timestamp as ordinary
+// named values).
+//
+// # Break Glass
+//
+// Implementations may choose to give callers access to the underlying
+// logging implementation. The recommended pattern for this is:
+//
+// // Underlier exposes access to the underlying logging implementation.
+// // Since callers only have a logr.Logger, they have to know which
+// // implementation is in use, so this interface is less of an abstraction
+// // and more of way to test type conversion.
+// type Underlier interface {
+// GetUnderlying()
+// }
+//
+// Logger grants access to the sink to enable type assertions like this:
+//
+// func DoSomethingWithImpl(log logr.Logger) {
+// if underlier, ok := log.GetSink().(impl.Underlier); ok {
+// implLogger := underlier.GetUnderlying()
+// ...
+// }
+// }
+//
+// Custom `With*` functions can be implemented by copying the complete
+// Logger struct and replacing the sink in the copy:
+//
+// // WithFooBar changes the foobar parameter in the log sink and returns a
+// // new logger with that modified sink. It does nothing for loggers where
+// // the sink doesn't support that parameter.
+// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
+// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
+// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
+// }
+// return log
+// }
+//
+// Don't use New to construct a new Logger with a LogSink retrieved from an
+// existing Logger. Source code attribution might not work correctly and
+// unexported fields in Logger get lost.
+//
+// Beware that the same LogSink instance may be shared by different logger
+// instances. Calling functions that modify the LogSink will affect all of
+// those.
+package logr
+
+// New returns a new Logger instance. This is primarily used by libraries
+// implementing LogSink, rather than end users. Passing a nil sink will create
+// a Logger which discards all log lines.
+func New(sink LogSink) Logger {
+ logger := Logger{}
+ logger.setSink(sink)
+ if sink != nil {
+ sink.Init(runtimeInfo)
+ }
+ return logger
+}
+
+// setSink stores the sink and updates any related fields. It mutates the
+// logger and thus is only safe to use for loggers that are not currently being
+// used concurrently.
+func (l *Logger) setSink(sink LogSink) {
+ l.sink = sink
+}
+
+// GetSink returns the stored sink.
+func (l Logger) GetSink() LogSink {
+ return l.sink
+}
+
+// WithSink returns a copy of the logger with the new sink.
+func (l Logger) WithSink(sink LogSink) Logger {
+ l.setSink(sink)
+ return l
+}
+
+// Logger is an interface to an abstract logging implementation. This is a
+// concrete type for performance reasons, but all the real work is passed on to
+// a LogSink. Implementations of LogSink should provide their own constructors
+// that return Logger, not LogSink.
+//
+// The underlying sink can be accessed through GetSink and be modified through
+// WithSink. This enables the implementation of custom extensions (see "Break
+// Glass" in the package documentation). Normally the sink should be used only
+// indirectly.
+type Logger struct {
+ sink LogSink
+ level int
+}
+
+// Enabled tests whether this Logger is enabled. For example, commandline
+// flags might be used to set the logging verbosity and disable some info logs.
+func (l Logger) Enabled() bool {
+ // Some implementations of LogSink look at the caller in Enabled (e.g.
+ // different verbosity levels per package or file), but we only pass one
+ // CallDepth in (via Init). This means that all calls from Logger to the
+ // LogSink's Enabled, Info, and Error methods must have the same number of
+ // frames. In other words, Logger methods can't call other Logger methods
+ // which call these LogSink methods unless we do it the same in all paths.
+ return l.sink != nil && l.sink.Enabled(l.level)
+}
+
+// Info logs a non-error message with the given key/value pairs as context.
+//
+// The msg argument should be used to add some constant description to the log
+// line. The key/value pairs can then be used to add additional variable
+// information. The key/value pairs must alternate string keys and arbitrary
+// values.
+func (l Logger) Info(msg string, keysAndValues ...any) {
+ if l.sink == nil {
+ return
+ }
+ if l.sink.Enabled(l.level) { // see comment in Enabled
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ withHelper.GetCallStackHelper()()
+ }
+ l.sink.Info(l.level, msg, keysAndValues...)
+ }
+}
+
+// Error logs an error, with the given message and key/value pairs as context.
+// It functions similarly to Info, but may have unique behavior, and should be
+// preferred for logging errors (see the package documentations for more
+// information). The log message will always be emitted, regardless of
+// verbosity level.
+//
+// The msg argument should be used to add context to any underlying error,
+// while the err argument should be used to attach the actual error that
+// triggered this log line, if present. The err parameter is optional
+// and nil may be passed instead of an error instance.
+func (l Logger) Error(err error, msg string, keysAndValues ...any) {
+ if l.sink == nil {
+ return
+ }
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ withHelper.GetCallStackHelper()()
+ }
+ l.sink.Error(err, msg, keysAndValues...)
+}
+
+// V returns a new Logger instance for a specific verbosity level, relative to
+// this Logger. In other words, V-levels are additive. A higher verbosity
+// level means a log message is less important. Negative V-levels are treated
+// as 0.
+func (l Logger) V(level int) Logger {
+ if l.sink == nil {
+ return l
+ }
+ if level < 0 {
+ level = 0
+ }
+ l.level += level
+ return l
+}
+
+// GetV returns the verbosity level of the logger. If the logger's LogSink is
+// nil as in the Discard logger, this will always return 0.
+func (l Logger) GetV() int {
+ // 0 if l.sink nil because of the if check in V above.
+ return l.level
+}
+
+// WithValues returns a new Logger instance with additional key/value pairs.
+// See Info for documentation on how key/value pairs work.
+func (l Logger) WithValues(keysAndValues ...any) Logger {
+ if l.sink == nil {
+ return l
+ }
+ l.setSink(l.sink.WithValues(keysAndValues...))
+ return l
+}
+
+// WithName returns a new Logger instance with the specified name element added
+// to the Logger's name. Successive calls with WithName append additional
+// suffixes to the Logger's name. It's strongly recommended that name segments
+// contain only letters, digits, and hyphens (see the package documentation for
+// more information).
+func (l Logger) WithName(name string) Logger {
+ if l.sink == nil {
+ return l
+ }
+ l.setSink(l.sink.WithName(name))
+ return l
+}
+
+// WithCallDepth returns a Logger instance that offsets the call stack by the
+// specified number of frames when logging call site information, if possible.
+// This is useful for users who have helper functions between the "real" call
+// site and the actual calls to Logger methods. If depth is 0 the attribution
+// should be to the direct caller of this function. If depth is 1 the
+// attribution should skip 1 call frame, and so on. Successive calls to this
+// are additive.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// it will be called and the result returned. If the implementation does not
+// support CallDepthLogSink, the original Logger will be returned.
+//
+// To skip one level, WithCallStackHelper() should be used instead of
+// WithCallDepth(1) because it works with implementions that support the
+// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
+func (l Logger) WithCallDepth(depth int) Logger {
+ if l.sink == nil {
+ return l
+ }
+ if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+ l.setSink(withCallDepth.WithCallDepth(depth))
+ }
+ return l
+}
+
+// WithCallStackHelper returns a new Logger instance that skips the direct
+// caller when logging call site information, if possible. This is useful for
+// users who have helper functions between the "real" call site and the actual
+// calls to Logger methods and want to support loggers which depend on marking
+// each individual helper function, like loggers based on testing.T.
+//
+// In addition to using that new logger instance, callers also must call the
+// returned function.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// WithCallDepth(1) will be called to produce a new logger. If it supports a
+// WithCallStackHelper() method, that will be also called. If the
+// implementation does not support either of these, the original Logger will be
+// returned.
+func (l Logger) WithCallStackHelper() (func(), Logger) {
+ if l.sink == nil {
+ return func() {}, l
+ }
+ var helper func()
+ if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+ l.setSink(withCallDepth.WithCallDepth(1))
+ }
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ helper = withHelper.GetCallStackHelper()
+ } else {
+ helper = func() {}
+ }
+ return helper, l
+}
+
+// IsZero returns true if this logger is an uninitialized zero value
+func (l Logger) IsZero() bool {
+ return l.sink == nil
+}
+
+// RuntimeInfo holds information that the logr "core" library knows which
+// LogSinks might want to know.
+type RuntimeInfo struct {
+ // CallDepth is the number of call frames the logr library adds between the
+ // end-user and the LogSink. LogSink implementations which choose to print
+ // the original logging site (e.g. file & line) should climb this many
+ // additional frames to find it.
+ CallDepth int
+}
+
+// runtimeInfo is a static global. It must not be changed at run time.
+var runtimeInfo = RuntimeInfo{
+ CallDepth: 1,
+}
+
+// LogSink represents a logging implementation. End-users will generally not
+// interact with this type.
+type LogSink interface {
+ // Init receives optional information about the logr library for LogSink
+ // implementations that need it.
+ Init(info RuntimeInfo)
+
+ // Enabled tests whether this LogSink is enabled at the specified V-level.
+ // For example, commandline flags might be used to set the logging
+ // verbosity and disable some info logs.
+ Enabled(level int) bool
+
+ // Info logs a non-error message with the given key/value pairs as context.
+ // The level argument is provided for optional logging. This method will
+ // only be called when Enabled(level) is true. See Logger.Info for more
+ // details.
+ Info(level int, msg string, keysAndValues ...any)
+
+ // Error logs an error, with the given message and key/value pairs as
+ // context. See Logger.Error for more details.
+ Error(err error, msg string, keysAndValues ...any)
+
+ // WithValues returns a new LogSink with additional key/value pairs. See
+ // Logger.WithValues for more details.
+ WithValues(keysAndValues ...any) LogSink
+
+ // WithName returns a new LogSink with the specified name appended. See
+ // Logger.WithName for more details.
+ WithName(name string) LogSink
+}
+
+// CallDepthLogSink represents a LogSink that knows how to climb the call stack
+// to identify the original call site and can offset the depth by a specified
+// number of frames. This is useful for users who have helper functions
+// between the "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as file,
+// function, or line) would otherwise log information about the intermediate
+// helper functions.
+//
+// This is an optional interface and implementations are not required to
+// support it.
+type CallDepthLogSink interface {
+ // WithCallDepth returns a LogSink that will offset the call
+ // stack by the specified number of frames when logging call
+ // site information.
+ //
+ // If depth is 0, the LogSink should skip exactly the number
+ // of call frames defined in RuntimeInfo.CallDepth when Info
+ // or Error are called, i.e. the attribution should be to the
+ // direct caller of Logger.Info or Logger.Error.
+ //
+ // If depth is 1 the attribution should skip 1 call frame, and so on.
+ // Successive calls to this are additive.
+ WithCallDepth(depth int) LogSink
+}
+
+// CallStackHelperLogSink represents a LogSink that knows how to climb
+// the call stack to identify the original call site and can skip
+// intermediate helper functions if they mark themselves as
+// helper. Go's testing package uses that approach.
+//
+// This is useful for users who have helper functions between the
+// "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as
+// file, function, or line) would otherwise log information about the
+// intermediate helper functions.
+//
+// This is an optional interface and implementations are not required
+// to support it. Implementations that choose to support this must not
+// simply implement it as WithCallDepth(1), because
+// Logger.WithCallStackHelper will call both methods if they are
+// present. This should only be implemented for LogSinks that actually
+// need it, as with testing.T.
+type CallStackHelperLogSink interface {
+ // GetCallStackHelper returns a function that must be called
+ // to mark the direct caller as helper function when logging
+ // call site information.
+ GetCallStackHelper() func()
+}
+
+// Marshaler is an optional interface that logged values may choose to
+// implement. Loggers with structured output, such as JSON, should
+// log the object return by the MarshalLog method instead of the
+// original value.
+type Marshaler interface {
+ // MarshalLog can be used to:
+ // - ensure that structs are not logged as strings when the original
+ // value has a String method: return a different type without a
+ // String method
+ // - select which fields of a complex type should get logged:
+ // return a simpler struct with fewer fields
+ // - log unexported fields: return a different struct
+ // with exported fields
+ //
+ // It may return any value of any type.
+ MarshalLog() any
+}
diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go
new file mode 100644
index 00000000..82d1ba49
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/sloghandler.go
@@ -0,0 +1,192 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+type slogHandler struct {
+ // May be nil, in which case all logs get discarded.
+ sink LogSink
+ // Non-nil if sink is non-nil and implements SlogSink.
+ slogSink SlogSink
+
+ // groupPrefix collects values from WithGroup calls. It gets added as
+ // prefix to value keys when handling a log record.
+ groupPrefix string
+
+ // levelBias can be set when constructing the handler to influence the
+ // slog.Level of log records. A positive levelBias reduces the
+ // slog.Level value. slog has no API to influence this value after the
+ // handler got created, so it can only be set indirectly through
+ // Logger.V.
+ levelBias slog.Level
+}
+
+var _ slog.Handler = &slogHandler{}
+
+// groupSeparator is used to concatenate WithGroup names and attribute keys.
+const groupSeparator = "."
+
+// GetLevel is used for black box unit testing.
+func (l *slogHandler) GetLevel() slog.Level {
+ return l.levelBias
+}
+
+func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
+ return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
+}
+
+func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
+ if l.slogSink != nil {
+ // Only adjust verbosity level of log entries < slog.LevelError.
+ if record.Level < slog.LevelError {
+ record.Level -= l.levelBias
+ }
+ return l.slogSink.Handle(ctx, record)
+ }
+
+ // No need to check for nil sink here because Handle will only be called
+ // when Enabled returned true.
+
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ return true
+ })
+ if record.Level >= slog.LevelError {
+ l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.sinkWithCallDepth().Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
+// are called by Handle, code in slog gets skipped.
+//
+// This offset currently (Go 1.21.0) works for calls through
+// slog.New(ToSlogHandler(...)). There's no guarantee that the call
+// chain won't change. Wrapping the handler will also break unwinding. It's
+// still better than not adjusting at all....
+//
+// This cannot be done when constructing the handler because FromSlogHandler needs
+// access to the original sink without this adjustment. A second copy would
+// work, but then WithAttrs would have to be called for both of them.
+func (l *slogHandler) sinkWithCallDepth() LogSink {
+ if sink, ok := l.sink.(CallDepthLogSink); ok {
+ return sink.WithCallDepth(2)
+ }
+ return l.sink
+}
+
+func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ if l.sink == nil || len(attrs) == 0 {
+ return l
+ }
+
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithAttrs(attrs)
+ clone.sink = clone.slogSink
+ } else {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ }
+ clone.sink = l.sink.WithValues(kvList...)
+ }
+ return &clone
+}
+
+func (l *slogHandler) WithGroup(name string) slog.Handler {
+ if l.sink == nil {
+ return l
+ }
+ if name == "" {
+ // slog says to inline empty groups
+ return l
+ }
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithGroup(name)
+ clone.sink = clone.slogSink
+ } else {
+ clone.groupPrefix = addPrefix(clone.groupPrefix, name)
+ }
+ return &clone
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ prefix := groupPrefix
+ if attr.Key != "" {
+ prefix = addPrefix(groupPrefix, attr.Key)
+ }
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, prefix, grpKVs)
+ }
+ kvList = append(kvList, grpKVs...)
+ } else if attr.Key != "" {
+ kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
+ }
+
+ return kvList
+}
+
+func addPrefix(prefix, name string) string {
+ if prefix == "" {
+ return name
+ }
+ if name == "" {
+ return prefix
+ }
+ return prefix + groupSeparator + name
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l *slogHandler) levelFromSlog(level slog.Level) int {
+ result := -level
+ result += l.levelBias // in case the original Logger had a V level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go
new file mode 100644
index 00000000..28a83d02
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogr.go
@@ -0,0 +1,100 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+// FromSlogHandler returns a Logger which writes to the slog.Handler.
+//
+// The logr verbosity level is mapped to slog levels such that V(0) becomes
+// slog.LevelInfo and V(4) becomes slog.LevelDebug.
+func FromSlogHandler(handler slog.Handler) Logger {
+ if handler, ok := handler.(*slogHandler); ok {
+ if handler.sink == nil {
+ return Discard()
+ }
+ return New(handler.sink).V(int(handler.levelBias))
+ }
+ return New(&slogSink{handler: handler})
+}
+
+// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
+//
+// The returned logger writes all records with level >= slog.LevelError as
+// error log entries with LogSink.Error, regardless of the verbosity level of
+// the Logger:
+//
+// logger :=
+// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
+//
+// The level of all other records gets reduced by the verbosity
+// level of the Logger and the result is negated. If it happens
+// to be negative, then it gets replaced by zero because a LogSink
+// is not expected to handled negative levels:
+//
+// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
+// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
+func ToSlogHandler(logger Logger) slog.Handler {
+ if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
+ return sink.handler
+ }
+
+ handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
+ if slogSink, ok := handler.sink.(SlogSink); ok {
+ handler.slogSink = slogSink
+ }
+ return handler
+}
+
+// SlogSink is an optional interface that a LogSink can implement to support
+// logging through the slog.Logger or slog.Handler APIs better. It then should
+// also support special slog values like slog.Group. When used as a
+// slog.Handler, the advantages are:
+//
+// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
+// as intended by slog
+// - proper grouping of key/value pairs via WithGroup
+// - verbosity levels > slog.LevelInfo can be recorded
+// - less overhead
+//
+// Both APIs (Logger and slog.Logger/Handler) then are supported equally
+// well. Developers can pick whatever API suits them better and/or mix
+// packages which use either API in the same binary with a common logging
+// implementation.
+//
+// This interface is necessary because the type implementing the LogSink
+// interface cannot also implement the slog.Handler interface due to the
+// different prototype of the common Enabled method.
+//
+// An implementation could support both interfaces in two different types, but then
+// additional interfaces would be needed to convert between those types in FromSlogHandler
+// and ToSlogHandler.
+type SlogSink interface {
+ LogSink
+
+ Handle(ctx context.Context, record slog.Record) error
+ WithAttrs(attrs []slog.Attr) SlogSink
+ WithGroup(name string) SlogSink
+}
diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go
new file mode 100644
index 00000000..4060fcbc
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogsink.go
@@ -0,0 +1,120 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+ "runtime"
+ "time"
+)
+
+var (
+ _ LogSink = &slogSink{}
+ _ CallDepthLogSink = &slogSink{}
+ _ Underlier = &slogSink{}
+)
+
+// Underlier is implemented by the LogSink returned by NewFromLogHandler.
+type Underlier interface {
+ // GetUnderlying returns the Handler used by the LogSink.
+ GetUnderlying() slog.Handler
+}
+
+const (
+ // nameKey is used to log the `WithName` values as an additional attribute.
+ nameKey = "logger"
+
+ // errKey is used to log the error parameter of Error as an additional attribute.
+ errKey = "err"
+)
+
+type slogSink struct {
+ callDepth int
+ name string
+ handler slog.Handler
+}
+
+func (l *slogSink) Init(info RuntimeInfo) {
+ l.callDepth = info.CallDepth
+}
+
+func (l *slogSink) GetUnderlying() slog.Handler {
+ return l.handler
+}
+
+func (l *slogSink) WithCallDepth(depth int) LogSink {
+ newLogger := *l
+ newLogger.callDepth += depth
+ return &newLogger
+}
+
+func (l *slogSink) Enabled(level int) bool {
+ return l.handler.Enabled(context.Background(), slog.Level(-level))
+}
+
+func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
+ l.log(nil, msg, slog.Level(-level), kvList...)
+}
+
+func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
+ l.log(err, msg, slog.LevelError, kvList...)
+}
+
+func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
+ var pcs [1]uintptr
+ // skip runtime.Callers, this function, Info/Error, and all helper functions above that.
+ runtime.Callers(3+l.callDepth, pcs[:])
+
+ record := slog.NewRecord(time.Now(), level, msg, pcs[0])
+ if l.name != "" {
+ record.AddAttrs(slog.String(nameKey, l.name))
+ }
+ if err != nil {
+ record.AddAttrs(slog.Any(errKey, err))
+ }
+ record.Add(kvList...)
+ _ = l.handler.Handle(context.Background(), record)
+}
+
+func (l slogSink) WithName(name string) LogSink {
+ if l.name != "" {
+ l.name += "/"
+ }
+ l.name += name
+ return &l
+}
+
+func (l slogSink) WithValues(kvList ...interface{}) LogSink {
+ l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
+ return &l
+}
+
+func kvListToAttrs(kvList ...interface{}) []slog.Attr {
+ // We don't need the record itself, only its Add method.
+ record := slog.NewRecord(time.Time{}, 0, "", 0)
+ record.Add(kvList...)
+ attrs := make([]slog.Attr, 0, record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ attrs = append(attrs, attr)
+ return true
+ })
+ return attrs
+}
diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md
new file mode 100644
index 00000000..51586678
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/README.md
@@ -0,0 +1,6 @@
+# Minimal Go logging using logr and Go's standard library
+
+[](https://pkg.go.dev/github.com/go-logr/stdr)
+
+This package implements the [logr interface](https://github.com/go-logr/logr)
+in terms of Go's standard log package(https://pkg.go.dev/log).
diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go
new file mode 100644
index 00000000..93a8aab5
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/stdr.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package stdr implements github.com/go-logr/logr.Logger in terms of
+// Go's standard log package.
+package stdr
+
+import (
+ "log"
+ "os"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/logr/funcr"
+)
+
+// The global verbosity level. See SetVerbosity().
+var globalVerbosity int
+
+// SetVerbosity sets the global level against which all info logs will be
+// compared. If this is greater than or equal to the "V" of the logger, the
+// message will be logged. A higher value here means more logs will be written.
+// The previous verbosity value is returned. This is not concurrent-safe -
+// callers must be sure to call it from only one goroutine.
+func SetVerbosity(v int) int {
+ old := globalVerbosity
+ globalVerbosity = v
+ return old
+}
+
+// New returns a logr.Logger which is implemented by Go's standard log package,
+// or something like it. If std is nil, this will use a default logger
+// instead.
+//
+// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+func New(std StdLogger) logr.Logger {
+ return NewWithOptions(std, Options{})
+}
+
+// NewWithOptions returns a logr.Logger which is implemented by Go's standard
+// log package, or something like it. See New for details.
+func NewWithOptions(std StdLogger, opts Options) logr.Logger {
+ if std == nil {
+ // Go's log.Default() is only available in 1.16 and higher.
+ std = log.New(os.Stderr, "", log.LstdFlags)
+ }
+
+ if opts.Depth < 0 {
+ opts.Depth = 0
+ }
+
+ fopts := funcr.Options{
+ LogCaller: funcr.MessageClass(opts.LogCaller),
+ }
+
+ sl := &logger{
+ Formatter: funcr.NewFormatter(fopts),
+ std: std,
+ }
+
+ // For skipping our own logger.Info/Error.
+ sl.Formatter.AddCallDepth(1 + opts.Depth)
+
+ return logr.New(sl)
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+ // Depth biases the assumed number of call frames to the "true" caller.
+ // This is useful when the calling code calls a function which then calls
+ // stdr (e.g. a logging shim to another API). Values less than zero will
+ // be treated as zero.
+ Depth int
+
+ // LogCaller tells stdr to add a "caller" key to some or all log lines.
+ // Go's log package has options to log this natively, too.
+ LogCaller MessageClass
+
+ // TODO: add an option to log the date/time
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+ // None ignores all message classes.
+ None MessageClass = iota
+ // All considers all message classes.
+ All
+ // Info only considers info messages.
+ Info
+ // Error only considers error messages.
+ Error
+)
+
+// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
+// this adapter.
+type StdLogger interface {
+ // Output is the same as log.Output and log.Logger.Output.
+ Output(calldepth int, logline string) error
+}
+
+type logger struct {
+ funcr.Formatter
+ std StdLogger
+}
+
+var _ logr.LogSink = &logger{}
+var _ logr.CallDepthLogSink = &logger{}
+
+func (l logger) Enabled(level int) bool {
+ return globalVerbosity >= level
+}
+
+func (l logger) Info(level int, msg string, kvList ...interface{}) {
+ prefix, args := l.FormatInfo(level, msg, kvList)
+ if prefix != "" {
+ args = prefix + ": " + args
+ }
+ _ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) Error(err error, msg string, kvList ...interface{}) {
+ prefix, args := l.FormatError(err, msg, kvList)
+ if prefix != "" {
+ args = prefix + ": " + args
+ }
+ _ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) WithName(name string) logr.LogSink {
+ l.Formatter.AddName(name)
+ return &l
+}
+
+func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
+ l.Formatter.AddValues(kvList)
+ return &l
+}
+
+func (l logger) WithCallDepth(depth int) logr.LogSink {
+ l.Formatter.AddCallDepth(depth)
+ return &l
+}
+
+// Underlier exposes access to the underlying logging implementation. Since
+// callers only have a logr.Logger, they have to know which implementation is
+// in use, so this interface is less of an abstraction and more of way to test
+// type conversion.
+type Underlier interface {
+ GetUnderlying() StdLogger
+}
+
+// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
+// is itself an interface, the result may or may not be a Go log.Logger.
+func (l logger) GetUnderlying() StdLogger {
+ return l.std
+}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/LICENSE b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/LICENSE
deleted file mode 100644
index 67c4fb56..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/LICENSE
+++ /dev/null
@@ -1,187 +0,0 @@
-Copyright © 2014, Roger Peppe, Canonical Inc.
-
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/bakery.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/bakery.go
deleted file mode 100644
index 32e94721..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/bakery.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package bakery
-
-import (
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// Bakery is a convenience type that contains both an Oven
-// and a Checker.
-type Bakery struct {
- Oven *Oven
- Checker *Checker
-}
-
-// BakeryParams holds a selection of parameters for the Oven
-// and the Checker created by New.
-//
-// For more fine-grained control of parameters, create the
-// Oven or Checker directly.
-//
-// The zero value is OK to use, but won't allow any authentication
-// or third party caveats to be added.
-type BakeryParams struct {
- // Logger is used to send log messages. If it is nil,
- // nothing will be logged.
- Logger Logger
-
- // Checker holds the checker used to check first party caveats.
- // If this is nil, New will use checkers.New(nil).
- Checker FirstPartyCaveatChecker
-
- // RootKeyStore holds the root key store to use. If you need to
- // use a different root key store for different operations,
- // you'll need to pass a RootKeyStoreForOps value to NewOven
- // directly.
- //
- // If this is nil, New will use NewMemRootKeyStore().
- // Note that that is almost certain insufficient for production services
- // that are spread across multiple instances or that need
- // to persist keys across restarts.
- RootKeyStore RootKeyStore
-
- // Locator is used to find out information on third parties when
- // adding third party caveats. If this is nil, no non-local third
- // party caveats can be added.
- Locator ThirdPartyLocator
-
- // Key holds the private key of the oven. If this is nil,
- // no third party caveats may be added.
- Key *KeyPair
-
- // OpsAuthorizer is used to check whether operations are authorized
- // by some other already-authorized operation. If it is nil,
- // NewChecker will assume no operation is authorized by any
- // operation except itself.
- OpsAuthorizer OpsAuthorizer
-
- // Location holds the location to use when creating new macaroons.
- Location string
-
- // LegacyMacaroonOp holds the operation to associate with old
- // macaroons that don't have associated operations.
- // If this is empty, legacy macaroons will not be associated
- // with any operations.
- LegacyMacaroonOp Op
-}
-
-// New returns a new Bakery instance which combines an Oven with a
-// Checker for the convenience of callers that wish to use both
-// together.
-func New(p BakeryParams) *Bakery {
- if p.Checker == nil {
- p.Checker = checkers.New(nil)
- }
- ovenParams := OvenParams{
- Key: p.Key,
- Namespace: p.Checker.Namespace(),
- Location: p.Location,
- Locator: p.Locator,
- LegacyMacaroonOp: p.LegacyMacaroonOp,
- }
- if p.RootKeyStore != nil {
- ovenParams.RootKeyStoreForOps = func(ops []Op) RootKeyStore {
- return p.RootKeyStore
- }
- }
- oven := NewOven(ovenParams)
-
- checker := NewChecker(CheckerParams{
- Checker: p.Checker,
- MacaroonVerifier: oven,
- OpsAuthorizer: p.OpsAuthorizer,
- })
- return &Bakery{
- Oven: oven,
- Checker: checker,
- }
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checker.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checker.go
deleted file mode 100644
index b864e2b1..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checker.go
+++ /dev/null
@@ -1,503 +0,0 @@
-package bakery
-
-import (
- "context"
- "sort"
- "sync"
- "time"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// Op holds an entity and action to be authorized on that entity.
-type Op struct {
- // Entity holds the name of the entity to be authorized.
- // Entity names should not contain spaces and should
- // not start with the prefix "login" or "multi-" (conventionally,
- // entity names will be prefixed with the entity type followed
- // by a hyphen.
- Entity string
-
- // Action holds the action to perform on the entity, such as "read"
- // or "delete". It is up to the service using a checker to define
- // a set of operations and keep them consistent over time.
- Action string
-}
-
-// NoOp holds the empty operation, signifying no authorized
-// operation. This is always considered to be authorized.
-// See OpsAuthorizer for one place that it's used.
-var NoOp = Op{}
-
-// CheckerParams holds parameters for NewChecker.
-type CheckerParams struct {
- // Checker is used to check first party caveats when authorizing.
- // If this is nil NewChecker will use checkers.New(nil).
- Checker FirstPartyCaveatChecker
-
- // OpsAuthorizer is used to check whether operations are authorized
- // by some other already-authorized operation. If it is nil,
- // NewChecker will assume no operation is authorized by any
- // operation except itself.
- OpsAuthorizer OpsAuthorizer
-
- // MacaroonVerifier is used to verify macaroons.
- MacaroonVerifier MacaroonVerifier
-
- // Logger is used to log checker operations. If it is nil,
- // DefaultLogger("bakery") will be used.
- Logger Logger
-}
-
-// OpsAuthorizer is used to check whether an operation authorizes some other
-// operation. For example, a macaroon with an operation allowing general access to a service
-// might also grant access to a more specific operation.
-type OpsAuthorizer interface {
- // AuthorizeOp reports which elements of queryOps are authorized by
- // authorizedOp. On return, each element of the slice should represent
- // whether the respective element in queryOps has been authorized.
- // An empty returned slice indicates that no operations are authorized.
- // AuthorizeOps may also return third party caveats that apply to
- // the authorized operations. Access will only be authorized when
- // those caveats are discharged by the client.
- //
- // When not all operations can be authorized with the macaroons
- // supplied to Checker.Auth, the checker will call AuthorizeOps
- // with NoOp, because some operations might be authorized
- // regardless of authority. NoOp will always be the last
- // operation queried within any given Allow call.
- //
- // AuthorizeOps should only return an error if authorization cannot be checked
- // (for example because of a database access failure), not because
- // authorization was denied.
- AuthorizeOps(ctx context.Context, authorizedOp Op, queryOps []Op) ([]bool, []checkers.Caveat, error)
-}
-
-// AuthInfo information about an authorization decision.
-type AuthInfo struct {
- // Macaroons holds all the macaroons that were
- // passed to Auth.
- Macaroons []macaroon.Slice
-
- // Used records which macaroons were used in the
- // authorization decision. It holds one element for
- // each element of Macaroons. Macaroons that
- // were invalid or unnecessary will have a false entry.
- Used []bool
-
- // OpIndexes holds the index of each macaroon
- // that was used to authorize an operation.
- OpIndexes map[Op]int
-}
-
-// Conditions returns the first party caveat caveat conditions hat apply to
-// the given AuthInfo. This can be used to apply appropriate caveats
-// to capability macaroons granted via a Checker.Allow call.
-func (a *AuthInfo) Conditions() []string {
- var squasher caveatSquasher
- for i, ms := range a.Macaroons {
- if !a.Used[i] {
- continue
- }
- for _, m := range ms {
- for _, cav := range m.Caveats() {
- if len(cav.VerificationId) > 0 {
- continue
- }
- squasher.add(string(cav.Id))
- }
- }
- }
- return squasher.final()
-}
-
-// Checker wraps a FirstPartyCaveatChecker and adds authentication and authorization checks.
-//
-// It uses macaroons as authorization tokens but it is not itself responsible for
-// creating the macaroons - see the Oven type (TODO) for one way of doing that.
-type Checker struct {
- FirstPartyCaveatChecker
- p CheckerParams
-}
-
-// NewChecker returns a new Checker using the given parameters.
-func NewChecker(p CheckerParams) *Checker {
- if p.Checker == nil {
- p.Checker = checkers.New(nil)
- }
- if p.Logger == nil {
- p.Logger = DefaultLogger("bakery")
- }
- return &Checker{
- FirstPartyCaveatChecker: p.Checker,
- p: p,
- }
-}
-
-// Auth makes a new AuthChecker instance using the
-// given macaroons to inform authorization decisions.
-func (c *Checker) Auth(mss ...macaroon.Slice) *AuthChecker {
- return &AuthChecker{
- Checker: c,
- macaroons: mss,
- }
-}
-
-// AuthChecker authorizes operations with respect to a user's request.
-type AuthChecker struct {
- // Checker is used to check first party caveats.
- *Checker
- macaroons []macaroon.Slice
- // conditions holds the first party caveat conditions
- // that apply to each of the above macaroons.
- conditions [][]string
- initOnce sync.Once
- initError error
- initErrors []error
- // authIndexes holds for each potentially authorized operation
- // the indexes of the macaroons that authorize it.
- authIndexes map[Op][]int
-}
-
-func (a *AuthChecker) init(ctx context.Context) error {
- a.initOnce.Do(func() {
- a.initError = a.initOnceFunc(ctx)
- })
- return a.initError
-}
-
-func (a *AuthChecker) initOnceFunc(ctx context.Context) error {
- a.authIndexes = make(map[Op][]int)
- a.conditions = make([][]string, len(a.macaroons))
- for i, ms := range a.macaroons {
- ops, conditions, err := a.p.MacaroonVerifier.VerifyMacaroon(ctx, ms)
- if err != nil {
- if !isVerificationError(err) {
- return errgo.Notef(err, "cannot retrieve macaroon")
- }
- a.initErrors = append(a.initErrors, errgo.Mask(err))
- continue
- }
- a.p.Logger.Debugf(ctx, "macaroon %d has valid sig; ops %q, conditions %q", i, ops, conditions)
- // It's a valid macaroon (in principle - we haven't checked first party caveats).
- a.conditions[i] = conditions
- for _, op := range ops {
- a.authIndexes[op] = append(a.authIndexes[op], i)
- }
- }
- return nil
-}
-
-// Allowed returns an AuthInfo that provides information on all
-// operations directly authorized by the macaroons provided
-// to Checker.Auth. Note that this does not include operations that would be indirectly
-// allowed via the OpAuthorizer.
-//
-// Allowed returns an error only when there is an underlying storage failure,
-// not when operations are not authorized.
-func (a *AuthChecker) Allowed(ctx context.Context) (*AuthInfo, error) {
- actx, err := a.newAllowContext(ctx, nil)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- for op, mindexes := range a.authIndexes {
- for _, mindex := range mindexes {
- if actx.status[mindex]&statusOK != 0 {
- actx.status[mindex] |= statusUsed
- actx.opIndexes[op] = mindex
- break
- }
- }
- }
- return actx.newAuthInfo(), nil
-}
-
-func (a *allowContext) newAuthInfo() *AuthInfo {
- info := &AuthInfo{
- Macaroons: a.checker.macaroons,
- Used: make([]bool, len(a.checker.macaroons)),
- OpIndexes: a.opIndexes,
- }
- for i, status := range a.status {
- if status&statusUsed != 0 {
- info.Used[i] = true
- }
- }
- return info
-}
-
-// allowContext holds temporary state used by AuthChecker.allowAny.
-type allowContext struct {
- checker *AuthChecker
-
- // status holds used and authorized status of all the
- // request macaroons.
- status []macaroonStatus
-
- // opIndex holds an entry for each authorized operation
- // that refers to the macaroon that authorized that operation.
- opIndexes map[Op]int
-
- // authed holds which of the requested operations have
- // been authorized so far.
- authed []bool
-
- // need holds all of the requested operations that
- // are remaining to be authorized. needIndex holds the
- // index of each of these operations in the original operations slice
- need []Op
- needIndex []int
-
- // errors holds any errors encountered during authorization.
- errors []error
-}
-
-type macaroonStatus uint8
-
-const (
- statusOK = 1 << iota
- statusUsed
-)
-
-func (a *AuthChecker) newAllowContext(ctx context.Context, ops []Op) (*allowContext, error) {
- actx := &allowContext{
- checker: a,
- status: make([]macaroonStatus, len(a.macaroons)),
- authed: make([]bool, len(ops)),
- need: append([]Op(nil), ops...),
- needIndex: make([]int, len(ops)),
- opIndexes: make(map[Op]int),
- }
- for i := range actx.needIndex {
- actx.needIndex[i] = i
- }
- if err := a.init(ctx); err != nil {
- return actx, errgo.Mask(err)
- }
- // Check all the macaroons with respect to the current context.
- // Technically this is more than we need to do, because some
- // of the macaroons might not authorize the specific operations
- // we're interested in, but that's an optimisation that could happen
- // later if performance becomes an issue with respect to that.
-outer:
- for i, ms := range a.macaroons {
- ctx := checkers.ContextWithMacaroons(ctx, a.Namespace(), ms)
- for _, cond := range a.conditions[i] {
- if err := a.CheckFirstPartyCaveat(ctx, cond); err != nil {
- actx.addError(err)
- continue outer
- }
- }
- actx.status[i] = statusOK
- }
- return actx, nil
-}
-
-// Macaroons returns the macaroons that were passed
-// to Checker.Auth when creating the AuthChecker.
-func (a *AuthChecker) Macaroons() []macaroon.Slice {
- return a.macaroons
-}
-
-// Allow checks that the authorizer's request is authorized to
-// perform all the given operations.
-//
-// If all the operations are allowed, an AuthInfo is returned holding
-// details of the decision.
-//
-// If an operation was not allowed, an error will be returned which may
-// be *DischargeRequiredError holding the operations that remain to
-// be authorized in order to allow authorization to
-// proceed.
-func (a *AuthChecker) Allow(ctx context.Context, ops ...Op) (*AuthInfo, error) {
- actx, err := a.newAllowContext(ctx, ops)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- actx.checkDirect(ctx)
- if len(actx.need) == 0 {
- return actx.newAuthInfo(), nil
- }
- caveats, err := actx.checkIndirect(ctx)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- if len(actx.need) == 0 && len(caveats) == 0 {
- // No more ops need to be authenticated and no caveats to be discharged.
- return actx.newAuthInfo(), nil
- }
- a.p.Logger.Debugf(ctx, "operations still needed after auth check: %#v", actx.need)
- if len(caveats) == 0 || len(actx.need) > 0 {
- allErrors := make([]error, 0, len(a.initErrors)+len(actx.errors))
- allErrors = append(allErrors, a.initErrors...)
- allErrors = append(allErrors, actx.errors...)
- var err error
- if len(allErrors) > 0 {
- // TODO return all errors?
- a.p.Logger.Infof(ctx, "all auth errors: %q", allErrors)
- err = allErrors[0]
- }
- return nil, errgo.WithCausef(err, ErrPermissionDenied, "")
- }
- return nil, &DischargeRequiredError{
- Message: "some operations have extra caveats",
- Ops: ops,
- Caveats: caveats,
- }
-}
-
-// checkDirect checks which operations are directly authorized by
-// the macaroon operations.
-func (a *allowContext) checkDirect(ctx context.Context) {
- defer a.updateNeed()
- for i, op := range a.need {
- if op == NoOp {
- // NoOp is always authorized.
- a.authed[a.needIndex[i]] = true
- continue
- }
- for _, mindex := range a.checker.authIndexes[op] {
- if a.status[mindex]&statusOK != 0 {
- a.authed[a.needIndex[i]] = true
- a.status[mindex] |= statusUsed
- a.opIndexes[op] = mindex
- break
- }
- }
- }
-}
-
-// checkIndirect checks to see if any of the remaining operations are authorized
-// indirectly with the already-authorized operations.
-func (a *allowContext) checkIndirect(ctx context.Context) ([]checkers.Caveat, error) {
- if a.checker.p.OpsAuthorizer == nil {
- return nil, nil
- }
- var allCaveats []checkers.Caveat
- for op, mindexes := range a.checker.authIndexes {
- if len(a.need) == 0 {
- break
- }
- for _, mindex := range mindexes {
- if a.status[mindex]&statusOK == 0 {
- continue
- }
- ctx := checkers.ContextWithMacaroons(ctx, a.checker.Namespace(), a.checker.macaroons[mindex])
- authedOK, caveats, err := a.checker.p.OpsAuthorizer.AuthorizeOps(ctx, op, a.need)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- // TODO we could perhaps combine identical third party caveats here.
- allCaveats = append(allCaveats, caveats...)
- for i, ok := range authedOK {
- if !ok {
- continue
- }
- // Operation is authorized. Mark the appropriate macaroon as used,
- // and remove the operation from the needed list so that we don't
- // bother AuthorizeOps with it again.
- a.status[mindex] |= statusUsed
- a.authed[a.needIndex[i]] = true
- a.opIndexes[a.need[i]] = mindex
- }
- }
- a.updateNeed()
- }
- if len(a.need) == 0 {
- return allCaveats, nil
- }
- // We've still got at least one operation unauthorized.
- // Try to see if it can be authorized with no operation at all.
- authedOK, caveats, err := a.checker.p.OpsAuthorizer.AuthorizeOps(ctx, NoOp, a.need)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- allCaveats = append(allCaveats, caveats...)
- for i, ok := range authedOK {
- if ok {
- a.authed[a.needIndex[i]] = true
- }
- }
- a.updateNeed()
- return allCaveats, nil
-}
-
-// updateNeed removes all authorized operations from a.need
-// and updates a.needIndex appropriately too.
-func (a *allowContext) updateNeed() {
- j := 0
- for i, opIndex := range a.needIndex {
- if a.authed[opIndex] {
- continue
- }
- if i != j {
- a.need[j], a.needIndex[j] = a.need[i], a.needIndex[i]
- }
- j++
- }
- a.need, a.needIndex = a.need[0:j], a.needIndex[0:j]
-}
-
-func (a *allowContext) addError(err error) {
- a.errors = append(a.errors, err)
-}
-
-// caveatSquasher rationalizes first party caveats created for a capability
-// by:
-// - including only the earliest time-before caveat.
-// - removing duplicates.
-type caveatSquasher struct {
- expiry time.Time
- conds []string
-}
-
-func (c *caveatSquasher) add(cond string) {
- if c.add0(cond) {
- c.conds = append(c.conds, cond)
- }
-}
-
-func (c *caveatSquasher) add0(cond string) bool {
- cond, args, err := checkers.ParseCaveat(cond)
- if err != nil {
- // Be safe - if we can't parse the caveat, just leave it there.
- return true
- }
- if cond != checkers.CondTimeBefore {
- return true
- }
- et, err := time.Parse(time.RFC3339Nano, args)
- if err != nil || et.IsZero() {
- // Again, if it doesn't seem valid, leave it alone.
- return true
- }
- if c.expiry.IsZero() || et.Before(c.expiry) {
- c.expiry = et
- }
- return false
-}
-
-func (c *caveatSquasher) final() []string {
- if !c.expiry.IsZero() {
- c.conds = append(c.conds, checkers.TimeBeforeCaveat(c.expiry).Condition)
- }
- if len(c.conds) == 0 {
- return nil
- }
- // Make deterministic and eliminate duplicates.
- sort.Strings(c.conds)
- prev := c.conds[0]
- j := 1
- for _, cond := range c.conds[1:] {
- if cond != prev {
- c.conds[j] = cond
- prev = cond
- j++
- }
- }
- c.conds = c.conds[:j]
- return c.conds
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/checkers.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/checkers.go
deleted file mode 100644
index 153b31d2..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/checkers.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// The checkers package provides some standard first-party
-// caveat checkers and some primitives for combining them.
-package checkers
-
-import (
- "context"
- "fmt"
- "sort"
- "strings"
-
- "gopkg.in/errgo.v1"
-)
-
-// StdNamespace holds the URI of the standard checkers schema.
-const StdNamespace = "std"
-
-// Constants for all the standard caveat conditions.
-// First and third party caveat conditions are both defined here,
-// even though notionally they exist in separate name spaces.
-const (
- CondDeclared = "declared"
- CondTimeBefore = "time-before"
- CondError = "error"
-)
-
-const (
- CondNeedDeclared = "need-declared"
-)
-
-// Func is the type of a function used by Checker to check a caveat. The
-// cond parameter will hold the caveat condition including any namespace
-// prefix; the arg parameter will hold any additional caveat argument
-// text.
-type Func func(ctx context.Context, cond, arg string) error
-
-// CheckerInfo holds information on a registered checker.
-type CheckerInfo struct {
- // Check holds the actual checker function.
- Check Func
- // Prefix holds the prefix for the checker condition.
- Prefix string
- // Name holds the name of the checker condition.
- Name string
- // Namespace holds the namespace URI for the checker's
- // schema.
- Namespace string
-}
-
-var allCheckers = map[string]Func{
- CondTimeBefore: checkTimeBefore,
- CondDeclared: checkDeclared,
- CondError: checkError,
-}
-
-// NewEmpty returns a checker using the given namespace
-// that has no registered checkers.
-// If ns is nil, a new one will be created.
-func NewEmpty(ns *Namespace) *Checker {
- if ns == nil {
- ns = NewNamespace(nil)
- }
- return &Checker{
- namespace: ns,
- checkers: make(map[string]CheckerInfo),
- }
-}
-
-// RegisterStd registers all the standard checkers in the given checker.
-// If not present already, the standard checkers schema (StdNamespace) is
-// added to the checker's namespace with an empty prefix.
-func RegisterStd(c *Checker) {
- c.namespace.Register(StdNamespace, "")
- for cond, check := range allCheckers {
- c.Register(cond, StdNamespace, check)
- }
-}
-
-// New returns a checker with all the standard caveats checkers registered.
-// If ns is nil, a new one will be created.
-// The standard namespace is also added to ns if not present.
-func New(ns *Namespace) *Checker {
- c := NewEmpty(ns)
- RegisterStd(c)
- return c
-}
-
-// Checker holds a set of checkers for first party caveats.
-// It implements bakery.CheckFirstParty caveat.
-type Checker struct {
- namespace *Namespace
- checkers map[string]CheckerInfo
-}
-
-// Register registers the given condition in the given namespace URI
-// to be checked with the given check function.
-// It will panic if the namespace is not registered or
-// if the condition has already been registered.
-func (c *Checker) Register(cond, uri string, check Func) {
- if check == nil {
- panic(fmt.Errorf("nil check function registered for namespace %q when registering condition %q", uri, cond))
- }
- prefix, ok := c.namespace.Resolve(uri)
- if !ok {
- panic(fmt.Errorf("no prefix registered for namespace %q when registering condition %q", uri, cond))
- }
- if prefix == "" && strings.Contains(cond, ":") {
- panic(fmt.Errorf("caveat condition %q in namespace %q contains a colon but its prefix is empty", cond, uri))
- }
- fullCond := ConditionWithPrefix(prefix, cond)
- if info, ok := c.checkers[fullCond]; ok {
- panic(fmt.Errorf("checker for %q (namespace %q) already registered in namespace %q", fullCond, uri, info.Namespace))
- }
- c.checkers[fullCond] = CheckerInfo{
- Check: check,
- Namespace: uri,
- Name: cond,
- Prefix: prefix,
- }
-}
-
-// Info returns information on all the registered checkers, sorted by namespace
-// and then name.
-func (c *Checker) Info() []CheckerInfo {
- checkers := make([]CheckerInfo, 0, len(c.checkers))
- for _, c := range c.checkers {
- checkers = append(checkers, c)
- }
- sort.Sort(checkerInfoByName(checkers))
- return checkers
-}
-
-// Namespace returns the namespace associated with the
-// checker. It implements bakery.FirstPartyCaveatChecker.Namespace.
-func (c *Checker) Namespace() *Namespace {
- return c.namespace
-}
-
-// CheckFirstPartyCaveat implements bakery.FirstPartyCaveatChecker
-// by checking the caveat against all registered caveats conditions.
-func (c *Checker) CheckFirstPartyCaveat(ctx context.Context, cav string) error {
- cond, arg, err := ParseCaveat(cav)
- if err != nil {
- // If we can't parse it, perhaps it's in some other format,
- // return a not-recognised error.
- return errgo.WithCausef(err, ErrCaveatNotRecognized, "cannot parse caveat %q", cav)
- }
- cf, ok := c.checkers[cond]
- if !ok {
- return errgo.NoteMask(ErrCaveatNotRecognized, fmt.Sprintf("caveat %q not satisfied", cav), errgo.Any)
- }
- if err := cf.Check(ctx, cond, arg); err != nil {
- return errgo.NoteMask(err, fmt.Sprintf("caveat %q not satisfied", cav), errgo.Any)
- }
- return nil
-}
-
-var errBadCaveat = errgo.New("bad caveat")
-
-func checkError(ctx context.Context, _, arg string) error {
- return errBadCaveat
-}
-
-// ErrCaveatNotRecognized is the cause of errors returned
-// from caveat checkers when the caveat was not
-// recognized.
-var ErrCaveatNotRecognized = errgo.New("caveat not recognized")
-
-// Caveat represents a condition that must be true for a check to
-// complete successfully. If Location is non-empty, the caveat must be
-// discharged by a third party at the given location.
-// The Namespace field holds the namespace URI of the
-// condition - if it is non-empty, it will be converted to
-// a namespace prefix before adding to the macaroon.
-type Caveat struct {
- Condition string
- Namespace string
- Location string
-}
-
-// Condition builds a caveat condition from the given name and argument.
-func Condition(name, arg string) string {
- if arg == "" {
- return name
- }
- return name + " " + arg
-}
-
-func firstParty(name, arg string) Caveat {
- return Caveat{
- Condition: Condition(name, arg),
- Namespace: StdNamespace,
- }
-}
-
-// ParseCaveat parses a caveat into an identifier, identifying the
-// checker that should be used, and the argument to the checker (the
-// rest of the string).
-//
-// The identifier is taken from all the characters before the first
-// space character.
-func ParseCaveat(cav string) (cond, arg string, err error) {
- if cav == "" {
- return "", "", fmt.Errorf("empty caveat")
- }
- i := strings.IndexByte(cav, ' ')
- if i < 0 {
- return cav, "", nil
- }
- if i == 0 {
- return "", "", fmt.Errorf("caveat starts with space character")
- }
- return cav[0:i], cav[i+1:], nil
-}
-
-// ErrorCaveatf returns a caveat that will never be satisfied, holding
-// the given fmt.Sprintf formatted text as the text of the caveat.
-//
-// This should only be used for highly unusual conditions that are never
-// expected to happen in practice, such as a malformed key that is
-// conventionally passed as a constant. It's not a panic but you should
-// only use it in cases where a panic might possibly be appropriate.
-//
-// This mechanism means that caveats can be created without error
-// checking and a later systematic check at a higher level (in the
-// bakery package) can produce an error instead.
-func ErrorCaveatf(f string, a ...interface{}) Caveat {
- return firstParty(CondError, fmt.Sprintf(f, a...))
-}
-
-type checkerInfoByName []CheckerInfo
-
-func (c checkerInfoByName) Less(i, j int) bool {
- info0, info1 := &c[i], &c[j]
- if info0.Namespace != info1.Namespace {
- return info0.Namespace < info1.Namespace
- }
- return info0.Name < info1.Name
-}
-
-func (c checkerInfoByName) Swap(i, j int) {
- c[i], c[j] = c[j], c[i]
-}
-
-func (c checkerInfoByName) Len() int {
- return len(c)
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/declared.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/declared.go
deleted file mode 100644
index f41d6c98..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/declared.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package checkers
-
-import (
- "context"
- "strings"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-)
-
-type macaroonsKey struct{}
-
-type macaroonsValue struct {
- ns *Namespace
- ms macaroon.Slice
-}
-
-// ContextWithMacaroons returns the given context associated with a
-// macaroon slice and the name space to use to interpret caveats in
-// the macaroons.
-func ContextWithMacaroons(ctx context.Context, ns *Namespace, ms macaroon.Slice) context.Context {
- return context.WithValue(ctx, macaroonsKey{}, macaroonsValue{
- ns: ns,
- ms: ms,
- })
-}
-
-// MacaroonsFromContext returns the namespace and macaroons associated
-// with the context by ContextWithMacaroons. This can be used to
-// implement "structural" first-party caveats that are predicated on
-// the macaroons being validated.
-func MacaroonsFromContext(ctx context.Context) (*Namespace, macaroon.Slice) {
- v, _ := ctx.Value(macaroonsKey{}).(macaroonsValue)
- return v.ns, v.ms
-}
-
-// DeclaredCaveat returns a "declared" caveat asserting that the given key is
-// set to the given value. If a macaroon has exactly one first party
-// caveat asserting the value of a particular key, then InferDeclared
-// will be able to infer the value, and then DeclaredChecker will allow
-// the declared value if it has the value specified here.
-//
-// If the key is empty or contains a space, DeclaredCaveat
-// will return an error caveat.
-func DeclaredCaveat(key string, value string) Caveat {
- if strings.Contains(key, " ") || key == "" {
- return ErrorCaveatf("invalid caveat 'declared' key %q", key)
- }
- return firstParty(CondDeclared, key+" "+value)
-}
-
-// NeedDeclaredCaveat returns a third party caveat that
-// wraps the provided third party caveat and requires
-// that the third party must add "declared" caveats for
-// all the named keys.
-// TODO(rog) namespaces in third party caveats?
-func NeedDeclaredCaveat(cav Caveat, keys ...string) Caveat {
- if cav.Location == "" {
- return ErrorCaveatf("need-declared caveat is not third-party")
- }
- return Caveat{
- Location: cav.Location,
- Condition: CondNeedDeclared + " " + strings.Join(keys, ",") + " " + cav.Condition,
- }
-}
-
-func checkDeclared(ctx context.Context, _, arg string) error {
- parts := strings.SplitN(arg, " ", 2)
- if len(parts) != 2 {
- return errgo.Newf("declared caveat has no value")
- }
- ns, ms := MacaroonsFromContext(ctx)
- attrs := InferDeclared(ns, ms)
- val, ok := attrs[parts[0]]
- if !ok {
- return errgo.Newf("got %s=null, expected %q", parts[0], parts[1])
- }
- if val != parts[1] {
- return errgo.Newf("got %s=%q, expected %q", parts[0], val, parts[1])
- }
- return nil
-}
-
-// InferDeclared retrieves any declared information from
-// the given macaroons and returns it as a key-value map.
-//
-// Information is declared with a first party caveat as created
-// by DeclaredCaveat.
-//
-// If there are two caveats that declare the same key with
-// different values, the information is omitted from the map.
-// When the caveats are later checked, this will cause the
-// check to fail.
-func InferDeclared(ns *Namespace, ms macaroon.Slice) map[string]string {
- var conditions []string
- for _, m := range ms {
- for _, cav := range m.Caveats() {
- if cav.Location == "" {
- conditions = append(conditions, string(cav.Id))
- }
- }
- }
- return InferDeclaredFromConditions(ns, conditions)
-}
-
-// InferDeclaredFromConditions is like InferDeclared except that
-// it is passed a set of first party caveat conditions rather than a set of macaroons.
-func InferDeclaredFromConditions(ns *Namespace, conds []string) map[string]string {
- var conflicts []string
- // If we can't resolve that standard namespace, then we'll look for
- // just bare "declared" caveats which will work OK for legacy
- // macaroons with no namespace.
- prefix, _ := ns.Resolve(StdNamespace)
- declaredCond := prefix + CondDeclared
-
- info := make(map[string]string)
- for _, cond := range conds {
- name, rest, _ := ParseCaveat(cond)
- if name != declaredCond {
- continue
- }
- parts := strings.SplitN(rest, " ", 2)
- if len(parts) != 2 {
- continue
- }
- key, val := parts[0], parts[1]
- if oldVal, ok := info[key]; ok && oldVal != val {
- conflicts = append(conflicts, key)
- continue
- }
- info[key] = val
- }
- for _, key := range conflicts {
- delete(info, key)
- }
- return info
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/namespace.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/namespace.go
deleted file mode 100644
index 8fbc8f87..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/namespace.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package checkers
-
-import (
- "sort"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "gopkg.in/errgo.v1"
-)
-
-// Namespace holds maps from schema URIs to the
-// prefixes that are used to encode them in first party
-// caveats. Several different URIs may map to the same
-// prefix - this is usual when several different backwardly
-// compatible schema versions are registered.
-type Namespace struct {
- uriToPrefix map[string]string
-}
-
-// Equal reports whether ns2 encodes the same namespace
-// as the receiver.
-func (ns1 *Namespace) Equal(ns2 *Namespace) bool {
- if ns1 == ns2 || ns1 == nil || ns2 == nil {
- return ns1 == ns2
- }
- if len(ns1.uriToPrefix) != len(ns2.uriToPrefix) {
- return false
- }
- for k, v := range ns1.uriToPrefix {
- if ns2.uriToPrefix[k] != v {
- return false
- }
- }
- return true
-}
-
-// NewNamespace returns a new namespace with the
-// given initial contents. It will panic if any of the
-// URI keys or their associated prefix are invalid
-// (see IsValidSchemaURI and IsValidPrefix).
-func NewNamespace(uriToPrefix map[string]string) *Namespace {
- ns := &Namespace{
- uriToPrefix: make(map[string]string),
- }
- for uri, prefix := range uriToPrefix {
- ns.Register(uri, prefix)
- }
- return ns
-}
-
-// String returns the namespace representation as returned by
-// ns.MarshalText.
-func (ns *Namespace) String() string {
- data, _ := ns.MarshalText()
- return string(data)
-}
-
-// MarshalText implements encoding.TextMarshaler by
-// returning all the elements in the namespace sorted by
-// URI, joined to the associated prefix with a colon and
-// separated with spaces.
-func (ns *Namespace) MarshalText() ([]byte, error) {
- if ns == nil || len(ns.uriToPrefix) == 0 {
- return nil, nil
- }
- uris := make([]string, 0, len(ns.uriToPrefix))
- dataLen := 0
- for uri, prefix := range ns.uriToPrefix {
- uris = append(uris, uri)
- dataLen += len(uri) + 1 + len(prefix) + 1
- }
- sort.Strings(uris)
- data := make([]byte, 0, dataLen)
- for i, uri := range uris {
- if i > 0 {
- data = append(data, ' ')
- }
- data = append(data, uri...)
- data = append(data, ':')
- data = append(data, ns.uriToPrefix[uri]...)
- }
- return data, nil
-}
-
-func (ns *Namespace) UnmarshalText(data []byte) error {
- uriToPrefix := make(map[string]string)
- elems := strings.Fields(string(data))
- for _, elem := range elems {
- i := strings.LastIndex(elem, ":")
- if i == -1 {
- return errgo.Newf("no colon in namespace field %q", elem)
- }
- uri, prefix := elem[0:i], elem[i+1:]
- if !IsValidSchemaURI(uri) {
- // Currently this can't happen because the only invalid URIs
- // are those which contain a space
- return errgo.Newf("invalid URI %q in namespace field %q", uri, elem)
- }
- if !IsValidPrefix(prefix) {
- return errgo.Newf("invalid prefix %q in namespace field %q", prefix, elem)
- }
- if _, ok := uriToPrefix[uri]; ok {
- return errgo.Newf("duplicate URI %q in namespace %q", uri, data)
- }
- uriToPrefix[uri] = prefix
- }
- ns.uriToPrefix = uriToPrefix
- return nil
-}
-
-// EnsureResolved tries to resolve the given schema URI to a prefix and
-// returns the prefix and whether the resolution was successful. If the
-// URI hasn't been registered but a compatible version has, the
-// given URI is registered with the same prefix.
-func (ns *Namespace) EnsureResolved(uri string) (string, bool) {
- // TODO(rog) compatibility
- return ns.Resolve(uri)
-}
-
-// Resolve resolves the given schema URI to its registered prefix and
-// returns the prefix and whether the resolution was successful.
-//
-// If ns is nil, it is treated as if it were empty.
-//
-// Resolve does not mutate ns and may be called concurrently
-// with other non-mutating Namespace methods.
-func (ns *Namespace) Resolve(uri string) (string, bool) {
- if ns == nil {
- return "", false
- }
- prefix, ok := ns.uriToPrefix[uri]
- return prefix, ok
-}
-
-// ResolveCaveat resolves the given caveat by using
-// Resolve to map from its schema namespace to the appropriate prefix using
-// Resolve. If there is no registered prefix for the namespace,
-// it returns an error caveat.
-//
-// If ns.Namespace is empty or ns.Location is non-empty, it returns cav unchanged.
-//
-// If ns is nil, it is treated as if it were empty.
-//
-// ResolveCaveat does not mutate ns and may be called concurrently
-// with other non-mutating Namespace methods.
-func (ns *Namespace) ResolveCaveat(cav Caveat) Caveat {
- // TODO(rog) If a namespace isn't registered, try to resolve it by
- // resolving it to the latest compatible version that is
- // registered.
- if cav.Namespace == "" || cav.Location != "" {
- return cav
- }
- prefix, ok := ns.Resolve(cav.Namespace)
- if !ok {
- errCav := ErrorCaveatf("caveat %q in unregistered namespace %q", cav.Condition, cav.Namespace)
- if errCav.Namespace != cav.Namespace {
- prefix, _ = ns.Resolve(errCav.Namespace)
- }
- cav = errCav
- }
- if prefix != "" {
- cav.Condition = ConditionWithPrefix(prefix, cav.Condition)
- }
- cav.Namespace = ""
- return cav
-}
-
-// ConditionWithPrefix returns the given string prefixed by the
-// given prefix. If the prefix is non-empty, a colon
-// is used to separate them.
-func ConditionWithPrefix(prefix, condition string) string {
- if prefix == "" {
- return condition
- }
- return prefix + ":" + condition
-}
-
-// Register registers the given URI and associates it
-// with the given prefix. If the URI has already been registered,
-// this is a no-op.
-func (ns *Namespace) Register(uri, prefix string) {
- if !IsValidSchemaURI(uri) {
- panic(errgo.Newf("cannot register invalid URI %q (prefix %q)", uri, prefix))
- }
- if !IsValidPrefix(prefix) {
- panic(errgo.Newf("cannot register invalid prefix %q for URI %q", prefix, uri))
- }
- if _, ok := ns.uriToPrefix[uri]; !ok {
- ns.uriToPrefix[uri] = prefix
- }
-}
-
-func invalidSchemaRune(r rune) bool {
- return unicode.IsSpace(r)
-}
-
-// IsValidSchemaURI reports whether the given argument is suitable for
-// use as a namespace schema URI. It must be non-empty, a valid UTF-8
-// string and it must not contain white space.
-func IsValidSchemaURI(uri string) bool {
- // TODO more stringent requirements?
- return len(uri) > 0 &&
- utf8.ValidString(uri) &&
- strings.IndexFunc(uri, invalidSchemaRune) == -1
-}
-
-func invalidPrefixRune(r rune) bool {
- return r == ' ' || r == ':' || unicode.IsSpace(r)
-}
-
-func IsValidPrefix(prefix string) bool {
- return utf8.ValidString(prefix) && strings.IndexFunc(prefix, invalidPrefixRune) == -1
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/time.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/time.go
deleted file mode 100644
index bd71cbbc..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers/time.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package checkers
-
-import (
- "context"
- "fmt"
- "time"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-)
-
-// Clock represents a clock that can be faked for testing purposes.
-type Clock interface {
- Now() time.Time
-}
-
-type timeKey struct{}
-
-func ContextWithClock(ctx context.Context, clock Clock) context.Context {
- if clock == nil {
- return ctx
- }
- return context.WithValue(ctx, timeKey{}, clock)
-}
-
-func clockFromContext(ctx context.Context) Clock {
- c, _ := ctx.Value(timeKey{}).(Clock)
- return c
-}
-
-func checkTimeBefore(ctx context.Context, _, arg string) error {
- var now time.Time
- if clock := clockFromContext(ctx); clock != nil {
- now = clock.Now()
- } else {
- now = time.Now()
- }
- t, err := time.Parse(time.RFC3339Nano, arg)
- if err != nil {
- return errgo.Mask(err)
- }
- if !now.Before(t) {
- return fmt.Errorf("macaroon has expired")
- }
- return nil
-}
-
-// TimeBeforeCaveat returns a caveat that specifies that
-// the time that it is checked should be before t.
-func TimeBeforeCaveat(t time.Time) Caveat {
- return firstParty(CondTimeBefore, t.UTC().Format(time.RFC3339Nano))
-}
-
-// ExpiryTime returns the minimum time of any time-before caveats found
-// in the given slice and whether there were any such caveats found.
-//
-// The ns parameter is used to determine the standard namespace prefix - if
-// the standard namespace is not found, the empty prefix is assumed.
-func ExpiryTime(ns *Namespace, cavs []macaroon.Caveat) (time.Time, bool) {
- prefix, _ := ns.Resolve(StdNamespace)
- timeBeforeCond := ConditionWithPrefix(prefix, CondTimeBefore)
- var t time.Time
- var expires bool
- for _, cav := range cavs {
- cav := string(cav.Id)
- name, rest, _ := ParseCaveat(cav)
- if name != timeBeforeCond {
- continue
- }
- et, err := time.Parse(time.RFC3339Nano, rest)
- if err != nil {
- continue
- }
- if !expires || et.Before(t) {
- t = et
- expires = true
- }
- }
- return t, expires
-}
-
-// MacaroonsExpiryTime returns the minimum time of any time-before
-// caveats found in the given macaroons and whether there were
-// any such caveats found.
-func MacaroonsExpiryTime(ns *Namespace, ms macaroon.Slice) (time.Time, bool) {
- var t time.Time
- var expires bool
- for _, m := range ms {
- if et, ex := ExpiryTime(ns, m.Caveats()); ex {
- if !expires || et.Before(t) {
- t = et
- expires = true
- }
- }
- }
- return t, expires
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/codec.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/codec.go
deleted file mode 100644
index fb76ba55..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/codec.go
+++ /dev/null
@@ -1,381 +0,0 @@
-package bakery
-
-import (
- "bytes"
- "crypto/rand"
- "encoding/base64"
- "encoding/binary"
- "encoding/json"
-
- "golang.org/x/crypto/nacl/box"
- "gopkg.in/errgo.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-type caveatRecord struct {
- RootKey []byte
- Condition string
-}
-
-// caveatJSON defines the format of a V1 JSON-encoded third party caveat id.
-type caveatJSON struct {
- ThirdPartyPublicKey *PublicKey
- FirstPartyPublicKey *PublicKey
- Nonce []byte
- Id string
-}
-
-// encodeCaveat encrypts a third-party caveat with the given condtion
-// and root key. The thirdPartyInfo key holds information about the
-// third party we're encrypting the caveat for; the key is the
-// public/private key pair of the party that's adding the caveat.
-//
-// The caveat will be encoded according to the version information
-// found in thirdPartyInfo.
-func encodeCaveat(
- condition string,
- rootKey []byte,
- thirdPartyInfo ThirdPartyInfo,
- key *KeyPair,
- ns *checkers.Namespace,
-) ([]byte, error) {
- switch thirdPartyInfo.Version {
- case Version0, Version1:
- return encodeCaveatV1(condition, rootKey, &thirdPartyInfo.PublicKey, key)
- case Version2:
- return encodeCaveatV2(condition, rootKey, &thirdPartyInfo.PublicKey, key)
- default:
- // Version 3 or later - use V3.
- return encodeCaveatV3(condition, rootKey, &thirdPartyInfo.PublicKey, key, ns)
- }
-}
-
-// encodeCaveatV1 creates a JSON-encoded third-party caveat
-// with the given condtion and root key. The thirdPartyPubKey key
-// represents the public key of the third party we're encrypting
-// the caveat for; the key is the public/private key pair of the party
-// that's adding the caveat.
-func encodeCaveatV1(
- condition string,
- rootKey []byte,
- thirdPartyPubKey *PublicKey,
- key *KeyPair,
-) ([]byte, error) {
- var nonce [NonceLen]byte
- if _, err := rand.Read(nonce[:]); err != nil {
- return nil, errgo.Notef(err, "cannot generate random number for nonce")
- }
- plain := caveatRecord{
- RootKey: rootKey,
- Condition: condition,
- }
- plainData, err := json.Marshal(&plain)
- if err != nil {
- return nil, errgo.Notef(err, "cannot marshal %#v", &plain)
- }
- sealed := box.Seal(nil, plainData, &nonce, thirdPartyPubKey.boxKey(), key.Private.boxKey())
- id := caveatJSON{
- ThirdPartyPublicKey: thirdPartyPubKey,
- FirstPartyPublicKey: &key.Public,
- Nonce: nonce[:],
- Id: base64.StdEncoding.EncodeToString(sealed),
- }
- data, err := json.Marshal(id)
- if err != nil {
- return nil, errgo.Notef(err, "cannot marshal %#v", id)
- }
- buf := make([]byte, base64.StdEncoding.EncodedLen(len(data)))
- base64.StdEncoding.Encode(buf, data)
- return buf, nil
-}
-
-// encodeCaveatV2 creates a version 2 third-party caveat.
-func encodeCaveatV2(
- condition string,
- rootKey []byte,
- thirdPartyPubKey *PublicKey,
- key *KeyPair,
-) ([]byte, error) {
- return encodeCaveatV2V3(Version2, condition, rootKey, thirdPartyPubKey, key, nil)
-}
-
-// encodeCaveatV3 creates a version 3 third-party caveat.
-func encodeCaveatV3(
- condition string,
- rootKey []byte,
- thirdPartyPubKey *PublicKey,
- key *KeyPair,
- ns *checkers.Namespace,
-) ([]byte, error) {
- return encodeCaveatV2V3(Version3, condition, rootKey, thirdPartyPubKey, key, ns)
-}
-
-const publicKeyPrefixLen = 4
-
-// version3CaveatMinLen holds an underestimate of the
-// minimum length of a version 3 caveat.
-const version3CaveatMinLen = 1 + 4 + 32 + 24 + box.Overhead + 1
-
-// encodeCaveatV3 creates a version 2 or version 3 third-party caveat.
-//
-// The format has the following packed binary fields (note
-// that all fields up to and including the nonce are the same
-// as the v2 format):
-//
-// version 2 or 3 [1 byte]
-// first 4 bytes of third-party Curve25519 public key [4 bytes]
-// first-party Curve25519 public key [32 bytes]
-// nonce [24 bytes]
-// encrypted secret part [rest of message]
-//
-// The encrypted part encrypts the following fields
-// with box.Seal:
-//
-// version 2 or 3 [1 byte]
-// length of root key [n: uvarint]
-// root key [n bytes]
-// length of encoded namespace [n: uvarint] (Version 3 only)
-// encoded namespace [n bytes] (Version 3 only)
-// condition [rest of encrypted part]
-func encodeCaveatV2V3(
- version Version,
- condition string,
- rootKey []byte,
- thirdPartyPubKey *PublicKey,
- key *KeyPair,
- ns *checkers.Namespace,
-) ([]byte, error) {
-
- var nsData []byte
- if version >= Version3 {
- data, err := ns.MarshalText()
- if err != nil {
- return nil, errgo.Mask(err)
- }
- nsData = data
- }
- // dataLen is our estimate of how long the data will be.
- // As we always use append, this doesn't have to be strictly
- // accurate but it's nice to avoid allocations.
- dataLen := 0 +
- 1 + // version
- publicKeyPrefixLen +
- KeyLen +
- NonceLen +
- box.Overhead +
- 1 + // version
- uvarintLen(uint64(len(rootKey))) +
- len(rootKey) +
- uvarintLen(uint64(len(nsData))) +
- len(nsData) +
- len(condition)
-
- var nonce [NonceLen]byte = uuidGen.Next()
-
- data := make([]byte, 0, dataLen)
- data = append(data, byte(version))
- data = append(data, thirdPartyPubKey.Key[:publicKeyPrefixLen]...)
- data = append(data, key.Public.Key[:]...)
- data = append(data, nonce[:]...)
- secret := encodeSecretPartV2V3(version, condition, rootKey, nsData)
- return box.Seal(data, secret, &nonce, thirdPartyPubKey.boxKey(), key.Private.boxKey()), nil
-}
-
-// encodeSecretPartV2V3 creates a version 2 or version 3 secret part of the third party
-// caveat. The returned data is not encrypted.
-//
-// The format has the following packed binary fields:
-// version 2 or 3 [1 byte]
-// root key length [n: uvarint]
-// root key [n bytes]
-// namespace length [n: uvarint] (v3 only)
-// namespace [n bytes] (v3 only)
-// predicate [rest of message]
-func encodeSecretPartV2V3(version Version, condition string, rootKey, nsData []byte) []byte {
- data := make([]byte, 0, 1+binary.MaxVarintLen64+len(rootKey)+len(condition))
- data = append(data, byte(version)) // version
- data = appendUvarint(data, uint64(len(rootKey)))
- data = append(data, rootKey...)
- if version >= Version3 {
- data = appendUvarint(data, uint64(len(nsData)))
- data = append(data, nsData...)
- }
- data = append(data, condition...)
- return data
-}
-
-// decodeCaveat attempts to decode caveat by decrypting the encrypted part
-// using key.
-func decodeCaveat(key *KeyPair, caveat []byte) (*ThirdPartyCaveatInfo, error) {
- if len(caveat) == 0 {
- return nil, errgo.New("empty third party caveat")
- }
- switch caveat[0] {
- case byte(Version2):
- return decodeCaveatV2V3(Version2, key, caveat)
- case byte(Version3):
- if len(caveat) < version3CaveatMinLen {
- // If it has the version 3 caveat tag and it's too short, it's
- // almost certainly an id, not an encrypted payload.
- return nil, errgo.Newf("caveat id payload not provided for caveat id %q", caveat)
- }
- return decodeCaveatV2V3(Version3, key, caveat)
- case 'e':
- // 'e' will be the first byte if the caveatid is a base64 encoded JSON object.
- return decodeCaveatV1(key, caveat)
- default:
- return nil, errgo.Newf("caveat has unsupported version %d", caveat[0])
- }
-}
-
-// decodeCaveatV1 attempts to decode a base64 encoded JSON id. This
-// encoding is nominally version -1.
-func decodeCaveatV1(key *KeyPair, caveat []byte) (*ThirdPartyCaveatInfo, error) {
- data := make([]byte, (3*len(caveat)+3)/4)
- n, err := base64.StdEncoding.Decode(data, caveat)
- if err != nil {
- return nil, errgo.Notef(err, "cannot base64-decode caveat")
- }
- data = data[:n]
- var wrapper caveatJSON
- if err := json.Unmarshal(data, &wrapper); err != nil {
- return nil, errgo.Notef(err, "cannot unmarshal caveat %q", data)
- }
- if !bytes.Equal(key.Public.Key[:], wrapper.ThirdPartyPublicKey.Key[:]) {
- return nil, errgo.New("public key mismatch")
- }
- if wrapper.FirstPartyPublicKey == nil {
- return nil, errgo.New("target service public key not specified")
- }
- // The encrypted string is base64 encoded in the JSON representation.
- secret, err := base64.StdEncoding.DecodeString(wrapper.Id)
- if err != nil {
- return nil, errgo.Notef(err, "cannot base64-decode encrypted data")
- }
- var nonce [NonceLen]byte
- if copy(nonce[:], wrapper.Nonce) < NonceLen {
- return nil, errgo.Newf("nonce too short %x", wrapper.Nonce)
- }
- c, ok := box.Open(nil, secret, &nonce, wrapper.FirstPartyPublicKey.boxKey(), key.Private.boxKey())
- if !ok {
- return nil, errgo.Newf("cannot decrypt caveat %#v", wrapper)
- }
- var record caveatRecord
- if err := json.Unmarshal(c, &record); err != nil {
- return nil, errgo.Notef(err, "cannot decode third party caveat record")
- }
- return &ThirdPartyCaveatInfo{
- Condition: []byte(record.Condition),
- FirstPartyPublicKey: *wrapper.FirstPartyPublicKey,
- ThirdPartyKeyPair: *key,
- RootKey: record.RootKey,
- Caveat: caveat,
- Version: Version1,
- Namespace: legacyNamespace(),
- }, nil
-}
-
-// decodeCaveatV2V3 decodes a version 2 or version 3 caveat.
-func decodeCaveatV2V3(version Version, key *KeyPair, caveat []byte) (*ThirdPartyCaveatInfo, error) {
- origCaveat := caveat
- if len(caveat) < 1+publicKeyPrefixLen+KeyLen+NonceLen+box.Overhead {
- return nil, errgo.New("caveat id too short")
- }
- caveat = caveat[1:] // skip version (already checked)
-
- publicKeyPrefix, caveat := caveat[:publicKeyPrefixLen], caveat[publicKeyPrefixLen:]
- if !bytes.Equal(key.Public.Key[:publicKeyPrefixLen], publicKeyPrefix) {
- return nil, errgo.New("public key mismatch")
- }
-
- var firstPartyPub PublicKey
- copy(firstPartyPub.Key[:], caveat[:KeyLen])
- caveat = caveat[KeyLen:]
-
- var nonce [NonceLen]byte
- copy(nonce[:], caveat[:NonceLen])
- caveat = caveat[NonceLen:]
-
- data, ok := box.Open(nil, caveat, &nonce, firstPartyPub.boxKey(), key.Private.boxKey())
- if !ok {
- return nil, errgo.Newf("cannot decrypt caveat id")
- }
- rootKey, ns, condition, err := decodeSecretPartV2V3(version, data)
- if err != nil {
- return nil, errgo.Notef(err, "invalid secret part")
- }
- return &ThirdPartyCaveatInfo{
- Condition: condition,
- FirstPartyPublicKey: firstPartyPub,
- ThirdPartyKeyPair: *key,
- RootKey: rootKey,
- Caveat: origCaveat,
- Version: version,
- Namespace: ns,
- }, nil
-}
-
-func decodeSecretPartV2V3(version Version, data []byte) (rootKey []byte, ns *checkers.Namespace, condition []byte, err error) {
- fail := func(err error) ([]byte, *checkers.Namespace, []byte, error) {
- return nil, nil, nil, err
- }
- if len(data) < 1 {
- return fail(errgo.New("secret part too short"))
- }
- gotVersion, data := data[0], data[1:]
- if version != Version(gotVersion) {
- return fail(errgo.Newf("unexpected secret part version, got %d want %d", gotVersion, version))
- }
-
- l, n := binary.Uvarint(data)
- if n <= 0 || uint64(n)+l > uint64(len(data)) {
- return fail(errgo.Newf("invalid root key length"))
- }
- data = data[n:]
- rootKey, data = data[:l], data[l:]
-
- if version >= Version3 {
- var nsData []byte
- var ns1 checkers.Namespace
-
- l, n = binary.Uvarint(data)
- if n <= 0 || uint64(n)+l > uint64(len(data)) {
- return fail(errgo.Newf("invalid namespace length"))
- }
- data = data[n:]
- nsData, data = data[:l], data[l:]
- if err := ns1.UnmarshalText(nsData); err != nil {
- return fail(errgo.Notef(err, "cannot unmarshal namespace"))
- }
- ns = &ns1
- } else {
- ns = legacyNamespace()
- }
- return rootKey, ns, data, nil
-}
-
-// appendUvarint appends n to data encoded as a variable-length
-// unsigned integer.
-func appendUvarint(data []byte, n uint64) []byte {
- // Ensure the capacity is sufficient. If our space calculations when
- // allocating data were correct, this should never happen,
- // but be defensive just in case.
- for need := uvarintLen(n); cap(data)-len(data) < need; {
- data1 := append(data[0:cap(data)], 0)
- data = data1[0:len(data)]
- }
- nlen := binary.PutUvarint(data[len(data):cap(data)], n)
- return data[0 : len(data)+nlen]
-}
-
-// uvarintLen returns the number of bytes that n will require
-// when encoded with binary.PutUvarint.
-func uvarintLen(n uint64) int {
- len := 1
- n >>= 7
- for ; n > 0; n >>= 7 {
- len++
- }
- return len
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/discharge.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/discharge.go
deleted file mode 100644
index 4c7b0ae6..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/discharge.go
+++ /dev/null
@@ -1,282 +0,0 @@
-package bakery
-
-import (
- "context"
- "crypto/rand"
- "fmt"
- "strconv"
- "strings"
-
- "gopkg.in/errgo.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// LocalThirdPartyCaveat returns a third-party caveat that, when added
-// to a macaroon with AddCaveat, results in a caveat
-// with the location "local", encrypted with the given public key.
-// This can be automatically discharged by DischargeAllWithKey.
-func LocalThirdPartyCaveat(key *PublicKey, version Version) checkers.Caveat {
- var loc string
- if version < Version2 {
- loc = "local " + key.String()
- } else {
- loc = fmt.Sprintf("local %d %s", version, key)
- }
- return checkers.Caveat{
- Location: loc,
- }
-}
-
-// parseLocalLocation parses a local caveat location as generated by
-// LocalThirdPartyCaveat. This is of the form:
-//
-// local
-//
-// where is the bakery version of the client that we're
-// adding the local caveat for.
-//
-// It returns false if the location does not represent a local
-// caveat location.
-func parseLocalLocation(loc string) (ThirdPartyInfo, bool) {
- if !strings.HasPrefix(loc, "local ") {
- return ThirdPartyInfo{}, false
- }
- version := Version1
- fields := strings.Fields(loc)
- fields = fields[1:] // Skip "local"
- switch len(fields) {
- case 2:
- v, err := strconv.Atoi(fields[0])
- if err != nil {
- return ThirdPartyInfo{}, false
- }
- version = Version(v)
- fields = fields[1:]
- fallthrough
- case 1:
- var key PublicKey
- if err := key.UnmarshalText([]byte(fields[0])); err != nil {
- return ThirdPartyInfo{}, false
- }
- return ThirdPartyInfo{
- PublicKey: key,
- Version: version,
- }, true
- default:
- return ThirdPartyInfo{}, false
- }
-}
-
-// DischargeParams holds parameters for a Discharge call.
-type DischargeParams struct {
- // Id holds the id to give to the discharge macaroon.
- // If Caveat is empty, then the id also holds the
- // encrypted third party caveat.
- Id []byte
-
- // Caveat holds the encrypted third party caveat. If this
- // is nil, Id will be used.
- Caveat []byte
-
- // Key holds the key to use to decrypt the third party
- // caveat information and to encrypt any additional
- // third party caveats returned by the caveat checker.
- Key *KeyPair
-
- // Checker is used to check the third party caveat,
- // and may also return further caveats to be added to
- // the discharge macaroon.
- Checker ThirdPartyCaveatChecker
-
- // Locator is used to information on third parties
- // referred to by third party caveats returned by the Checker.
- Locator ThirdPartyLocator
-}
-
-// Discharge creates a macaroon to discharges a third party caveat.
-// The given parameters specify the caveat and how it should be checked/
-//
-// The condition implicit in the caveat is checked for validity using p.Checker. If
-// it is valid, a new macaroon is returned which discharges the caveat.
-//
-// The macaroon is created with a version derived from the version
-// that was used to encode the id.
-func Discharge(ctx context.Context, p DischargeParams) (*Macaroon, error) {
- var caveatIdPrefix []byte
- if p.Caveat == nil {
- // The caveat information is encoded in the id itself.
- p.Caveat = p.Id
- } else {
- // We've been given an explicit id, so when extra third party
- // caveats are added, use that id as the prefix
- // for any more ids.
- caveatIdPrefix = p.Id
- }
- cavInfo, err := decodeCaveat(p.Key, p.Caveat)
- if err != nil {
- return nil, errgo.Notef(err, "discharger cannot decode caveat id")
- }
- cavInfo.Id = p.Id
- // Note that we don't check the error - we allow the
- // third party checker to see even caveats that we can't
- // understand.
- cond, arg, _ := checkers.ParseCaveat(string(cavInfo.Condition))
-
- var caveats []checkers.Caveat
- if cond == checkers.CondNeedDeclared {
- cavInfo.Condition = []byte(arg)
- caveats, err = checkNeedDeclared(ctx, cavInfo, p.Checker)
- } else {
- caveats, err = p.Checker.CheckThirdPartyCaveat(ctx, cavInfo)
- }
- if err != nil {
- return nil, errgo.Mask(err, errgo.Any)
- }
- // Note that the discharge macaroon does not need to
- // be stored persistently. Indeed, it would be a problem if
- // we did, because then the macaroon could potentially be used
- // for normal authorization with the third party.
- m, err := NewMacaroon(cavInfo.RootKey, p.Id, "", cavInfo.Version, cavInfo.Namespace)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- m.caveatIdPrefix = caveatIdPrefix
- for _, cav := range caveats {
- if err := m.AddCaveat(ctx, cav, p.Key, p.Locator); err != nil {
- return nil, errgo.Notef(err, "could not add caveat")
- }
- }
- return m, nil
-}
-
-func checkNeedDeclared(ctx context.Context, cavInfo *ThirdPartyCaveatInfo, checker ThirdPartyCaveatChecker) ([]checkers.Caveat, error) {
- arg := string(cavInfo.Condition)
- i := strings.Index(arg, " ")
- if i <= 0 {
- return nil, errgo.Newf("need-declared caveat requires an argument, got %q", arg)
- }
- needDeclared := strings.Split(arg[0:i], ",")
- for _, d := range needDeclared {
- if d == "" {
- return nil, errgo.New("need-declared caveat with empty required attribute")
- }
- }
- if len(needDeclared) == 0 {
- return nil, fmt.Errorf("need-declared caveat with no required attributes")
- }
- cavInfo.Condition = []byte(arg[i+1:])
- caveats, err := checker.CheckThirdPartyCaveat(ctx, cavInfo)
- if err != nil {
- return nil, errgo.Mask(err, errgo.Any)
- }
- declared := make(map[string]bool)
- for _, cav := range caveats {
- if cav.Location != "" {
- continue
- }
- // Note that we ignore the error. We allow the service to
- // generate caveats that we don't understand here.
- cond, arg, _ := checkers.ParseCaveat(cav.Condition)
- if cond != checkers.CondDeclared {
- continue
- }
- parts := strings.SplitN(arg, " ", 2)
- if len(parts) != 2 {
- return nil, errgo.Newf("declared caveat has no value")
- }
- declared[parts[0]] = true
- }
- // Add empty declarations for everything mentioned in need-declared
- // that was not actually declared.
- for _, d := range needDeclared {
- if !declared[d] {
- caveats = append(caveats, checkers.DeclaredCaveat(d, ""))
- }
- }
- return caveats, nil
-}
-
-func randomBytes(n int) ([]byte, error) {
- b := make([]byte, n)
- _, err := rand.Read(b)
- if err != nil {
- return nil, fmt.Errorf("cannot generate %d random bytes: %v", n, err)
- }
- return b, nil
-}
-
-// ThirdPartyCaveatInfo holds the information decoded from
-// a third party caveat id.
-type ThirdPartyCaveatInfo struct {
- // Condition holds the third party condition to be discharged.
- // This is the only field that most third party dischargers will
- // need to consider.
- Condition []byte
-
- // FirstPartyPublicKey holds the public key of the party
- // that created the third party caveat.
- FirstPartyPublicKey PublicKey
-
- // ThirdPartyKeyPair holds the key pair used to decrypt
- // the caveat - the key pair of the discharging service.
- ThirdPartyKeyPair KeyPair
-
- // RootKey holds the secret root key encoded by the caveat.
- RootKey []byte
-
- // CaveatId holds the full encoded caveat id from which all
- // the other fields are derived.
- Caveat []byte
-
- // Version holds the version that was used to encode
- // the caveat id.
- Version Version
-
- // Id holds the id of the third party caveat (the id that
- // the discharge macaroon should be given). This
- // will differ from Caveat when the caveat information
- // is encoded separately.
- Id []byte
-
- // Namespace holds the namespace of the first party
- // that created the macaroon, as encoded by the party
- // that added the third party caveat.
- Namespace *checkers.Namespace
-}
-
-// ThirdPartyCaveatChecker holds a function that checks third party caveats
-// for validity. If the caveat is valid, it returns a nil error and
-// optionally a slice of extra caveats that will be added to the
-// discharge macaroon. The caveatId parameter holds the still-encoded id
-// of the caveat.
-//
-// If the caveat kind was not recognised, the checker should return an
-// error with a ErrCaveatNotRecognized cause.
-type ThirdPartyCaveatChecker interface {
- CheckThirdPartyCaveat(ctx context.Context, info *ThirdPartyCaveatInfo) ([]checkers.Caveat, error)
-}
-
-// ThirdPartyCaveatCheckerFunc implements ThirdPartyCaveatChecker by calling a function.
-type ThirdPartyCaveatCheckerFunc func(context.Context, *ThirdPartyCaveatInfo) ([]checkers.Caveat, error)
-
-// CheckThirdPartyCaveat implements ThirdPartyCaveatChecker.CheckThirdPartyCaveat by calling
-// the receiver with the given arguments
-func (c ThirdPartyCaveatCheckerFunc) CheckThirdPartyCaveat(ctx context.Context, info *ThirdPartyCaveatInfo) ([]checkers.Caveat, error) {
- return c(ctx, info)
-}
-
-// FirstPartyCaveatChecker is used to check first party caveats
-// for validity with respect to information in the provided context.
-//
-// If the caveat kind was not recognised, the checker should return
-// ErrCaveatNotRecognized.
-type FirstPartyCaveatChecker interface {
- // CheckFirstPartyCaveat checks that the given caveat condition
- // is valid with respect to the given context information.
- CheckFirstPartyCaveat(ctx context.Context, caveat string) error
-
- // Namespace returns the namespace associated with the
- // caveat checker.
- Namespace() *checkers.Namespace
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/dischargeall.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/dischargeall.go
deleted file mode 100644
index 9c117ba8..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/dischargeall.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package bakery
-
-import (
- "context"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// DischargeAll gathers discharge macaroons for all the third party
-// caveats in m (and any subsequent caveats required by those) using
-// getDischarge to acquire each discharge macaroon. It returns a slice
-// with m as the first element, followed by all the discharge macaroons.
-// All the discharge macaroons will be bound to the primary macaroon.
-//
-// The getDischarge function is passed the caveat to be discharged;
-// encryptedCaveat will be passed the external caveat payload found
-// in m, if any.
-func DischargeAll(
- ctx context.Context,
- m *Macaroon,
- getDischarge func(ctx context.Context, cav macaroon.Caveat, encryptedCaveat []byte) (*Macaroon, error),
-) (macaroon.Slice, error) {
- return DischargeAllWithKey(ctx, m, getDischarge, nil)
-}
-
-// DischargeAllWithKey is like DischargeAll except that the localKey
-// parameter may optionally hold the key of the client, in which case it
-// will be used to discharge any third party caveats with the special
-// location "local". In this case, the caveat itself must be "true". This
-// can be used be a server to ask a client to prove ownership of the
-// private key.
-//
-// When localKey is nil, DischargeAllWithKey is exactly the same as
-// DischargeAll.
-func DischargeAllWithKey(
- ctx context.Context,
- m *Macaroon,
- getDischarge func(ctx context.Context, cav macaroon.Caveat, encodedCaveat []byte) (*Macaroon, error),
- localKey *KeyPair,
-) (macaroon.Slice, error) {
- discharges, err := Slice{m}.DischargeAll(ctx, getDischarge, localKey)
- if err != nil {
- return nil, errgo.Mask(err, errgo.Any)
- }
- return discharges.Bind(), nil
-}
-
-var localDischargeChecker = ThirdPartyCaveatCheckerFunc(func(_ context.Context, info *ThirdPartyCaveatInfo) ([]checkers.Caveat, error) {
- if string(info.Condition) != "true" {
- return nil, checkers.ErrCaveatNotRecognized
- }
- return nil, nil
-})
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/doc.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/doc.go
deleted file mode 100644
index f58f699d..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/doc.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// The bakery package layers on top of the macaroon package, providing
-// a transport and store-agnostic way of using macaroons to assert
-// client capabilities.
-//
-// Summary
-//
-// The Bakery type is probably where you want to start.
-// It encapsulates a Checker type, which performs checking
-// of operations, and an Oven type, which encapsulates
-// the actual details of the macaroon encoding conventions.
-//
-// Most other types and functions are designed either to plug
-// into one of the above types (the various Authorizer
-// implementations, for example), or to expose some independent
-// functionality that's potentially useful (Discharge, for example).
-//
-// The rest of this introduction introduces some of the concepts
-// used by the bakery package.
-//
-// Identity and entities
-//
-// An Identity represents some authenticated user (or agent), usually
-// the client in a network protocol. An identity can be authenticated by
-// an external identity server (with a third party macaroon caveat) or
-// by locally provided information such as a username and password.
-//
-// The Checker type is not responsible for determining identity - that
-// functionality is represented by the IdentityClient interface.
-//
-// The Checker uses identities to decide whether something should be
-// allowed or not - the Authorizer interface is used to ask whether a
-// given identity should be allowed to perform some set of operations.
-//
-// Operations
-//
-// An operation defines some requested action on an entity. For example,
-// if file system server defines an entity for every file in the server,
-// an operation to read a file might look like:
-//
-// Op{
-// Entity: "/foo",
-// Action: "write",
-// }
-//
-// The exact set of entities and actions is up to the caller, but should
-// be kept stable over time because authorization tokens will contain
-// these names.
-//
-// To authorize some request on behalf of a remote user, first find out
-// what operations that request needs to perform. For example, if the
-// user tries to delete a file, the entity might be the path to the
-// file's directory and the action might be "write". It may often be
-// possible to determine the operations required by a request without
-// reference to anything external, when the request itself contains all
-// the necessary information.
-//
-// The LoginOp operation is special - any macaroon associated with this
-// operation is treated as a bearer of identity information. If two
-// valid LoginOp macaroons are presented, only the first one will be
-// used for identity.
-//
-// Authorization
-//
-// The Authorizer interface is responsible for determining whether a
-// given authenticated identity is authorized to perform a set of
-// operations. This is used when the macaroons provided to Auth are not
-// sufficient to authorize the operations themselves.
-//
-// Capabilities
-//
-// A "capability" is represented by a macaroon that's associated with
-// one or more operations, and grants the capability to perform all
-// those operations. The AllowCapability method reports whether a
-// capability is allowed. It takes into account any authenticated
-// identity and any other capabilities provided.
-//
-// Third party caveats
-//
-// Sometimes authorization will only be granted if a third party caveat
-// is discharged. This will happen when an IdentityClient or Authorizer
-// returns a third party caveat.
-//
-// When this happens, a DischargeRequiredError will be returned
-// containing the caveats and the operations required. The caller is
-// responsible for creating a macaroon with those caveats associated
-// with those operations and for passing that macaroon to the client to
-// discharge.
-package bakery
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/error.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/error.go
deleted file mode 100644
index 1a059d59..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/error.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package bakery
-
-import (
- "fmt"
-
- "gopkg.in/errgo.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-var (
- // ErrNotFound is returned by Store.Get implementations
- // to signal that an id has not been found.
- ErrNotFound = errgo.New("not found")
-
- // ErrPermissionDenied is returned from AuthChecker when
- // permission has been denied.
- ErrPermissionDenied = errgo.New("permission denied")
-)
-
-// DischargeRequiredError is returned when authorization has failed and a
-// discharged macaroon might fix it.
-//
-// A caller should grant the user the ability to authorize by minting a
-// macaroon associated with Ops (see MacaroonStore.MacaroonIdInfo for
-// how the associated operations are retrieved) and adding Caveats. If
-// the user succeeds in discharging the caveats, the authorization will
-// be granted.
-type DischargeRequiredError struct {
- // Message holds some reason why the authorization was denied.
- // TODO this is insufficient (and maybe unnecessary) because we
- // can have multiple errors.
- Message string
-
- // Ops holds all the operations that were not authorized.
- // If Ops contains a single LoginOp member, the macaroon
- // should be treated as an login token. Login tokens (also
- // known as authentication macaroons) usually have a longer
- // life span than other macaroons.
- Ops []Op
-
- // Caveats holds the caveats that must be added
- // to macaroons that authorize the above operations.
- Caveats []checkers.Caveat
-
- // ForAuthentication holds whether the macaroon holding
- // the discharges will be used for authentication, and hence
- // should have wider scope and longer lifetime.
- // The bakery package never sets this field, but bakery/identchecker
- // uses it.
- ForAuthentication bool
-}
-
-func (e *DischargeRequiredError) Error() string {
- return "macaroon discharge required: " + e.Message
-}
-
-func IsDischargeRequiredError(err error) bool {
- _, ok := err.(*DischargeRequiredError)
- return ok
-}
-
-// VerificationError is used to signify that an error is because
-// of a verification failure rather than because verification
-// could not be done.
-type VerificationError struct {
- Reason error
-}
-
-func (e *VerificationError) Error() string {
- return fmt.Sprintf("verification failed: %v", e.Reason)
-}
-
-func isVerificationError(err error) bool {
- _, ok := errgo.Cause(err).(*VerificationError)
- return ok
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/keys.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/keys.go
deleted file mode 100644
index 7cffa9f3..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/keys.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package bakery
-
-import (
- "context"
- "crypto/rand"
- "encoding/base64"
- "encoding/json"
- "strings"
- "sync"
-
- "golang.org/x/crypto/curve25519"
- "golang.org/x/crypto/nacl/box"
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-)
-
-// KeyLen is the byte length of the Ed25519 public and private keys used for
-// caveat id encryption.
-const KeyLen = 32
-
-// NonceLen is the byte length of the nonce values used for caveat id
-// encryption.
-const NonceLen = 24
-
-// PublicKey is a 256-bit Ed25519 public key.
-type PublicKey struct {
- Key
-}
-
-// PrivateKey is a 256-bit Ed25519 private key.
-type PrivateKey struct {
- Key
-}
-
-// Public derives the public key from a private key.
-func (k PrivateKey) Public() PublicKey {
- var pub PublicKey
- curve25519.ScalarBaseMult((*[32]byte)(&pub.Key), (*[32]byte)(&k.Key))
- return pub
-}
-
-// Key is a 256-bit Ed25519 key.
-type Key [KeyLen]byte
-
-// String returns the base64 representation of the key.
-func (k Key) String() string {
- return base64.StdEncoding.EncodeToString(k[:])
-}
-
-// MarshalBinary implements encoding.BinaryMarshaler.MarshalBinary.
-func (k Key) MarshalBinary() ([]byte, error) {
- return k[:], nil
-}
-
-// isZero reports whether the key consists entirely of zeros.
-func (k Key) isZero() bool {
- return k == Key{}
-}
-
-// UnmarshalBinary implements encoding.BinaryUnmarshaler.UnmarshalBinary.
-func (k *Key) UnmarshalBinary(data []byte) error {
- if len(data) != len(k) {
- return errgo.Newf("wrong length for key, got %d want %d", len(data), len(k))
- }
- copy(k[:], data)
- return nil
-}
-
-// MarshalText implements encoding.TextMarshaler.MarshalText.
-func (k Key) MarshalText() ([]byte, error) {
- data := make([]byte, base64.StdEncoding.EncodedLen(len(k)))
- base64.StdEncoding.Encode(data, k[:])
- return data, nil
-}
-
-// boxKey returns the box package's type for a key.
-func (k Key) boxKey() *[KeyLen]byte {
- return (*[KeyLen]byte)(&k)
-}
-
-// UnmarshalText implements encoding.TextUnmarshaler.UnmarshalText.
-func (k *Key) UnmarshalText(text []byte) error {
- data, err := macaroon.Base64Decode(text)
- if err != nil {
- return errgo.Notef(err, "cannot decode base64 key")
- }
- if len(data) != len(k) {
- return errgo.Newf("wrong length for key, got %d want %d", len(data), len(k))
- }
- copy(k[:], data)
- return nil
-}
-
-// ThirdPartyInfo holds information on a given third party
-// discharge service.
-type ThirdPartyInfo struct {
- // PublicKey holds the public key of the third party.
- PublicKey PublicKey
-
- // Version holds latest the bakery protocol version supported
- // by the discharger.
- Version Version
-}
-
-// ThirdPartyLocator is used to find information on third
-// party discharge services.
-type ThirdPartyLocator interface {
- // ThirdPartyInfo returns information on the third
- // party at the given location. It returns ErrNotFound if no match is found.
- // This method must be safe to call concurrently.
- ThirdPartyInfo(ctx context.Context, loc string) (ThirdPartyInfo, error)
-}
-
-// ThirdPartyStore implements a simple ThirdPartyLocator.
-// A trailing slash on locations is ignored.
-type ThirdPartyStore struct {
- mu sync.RWMutex
- m map[string]ThirdPartyInfo
-}
-
-// NewThirdPartyStore returns a new instance of ThirdPartyStore
-// that stores locations in memory.
-func NewThirdPartyStore() *ThirdPartyStore {
- return &ThirdPartyStore{
- m: make(map[string]ThirdPartyInfo),
- }
-}
-
-// AddInfo associates the given information with the
-// given location, ignoring any trailing slash.
-// This method is OK to call concurrently with sThirdPartyInfo.
-func (s *ThirdPartyStore) AddInfo(loc string, info ThirdPartyInfo) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.m[canonicalLocation(loc)] = info
-}
-
-func canonicalLocation(loc string) string {
- return strings.TrimSuffix(loc, "/")
-}
-
-// ThirdPartyInfo implements the ThirdPartyLocator interface.
-func (s *ThirdPartyStore) ThirdPartyInfo(ctx context.Context, loc string) (ThirdPartyInfo, error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
- if info, ok := s.m[canonicalLocation(loc)]; ok {
- return info, nil
- }
- return ThirdPartyInfo{}, ErrNotFound
-}
-
-// KeyPair holds a public/private pair of keys.
-type KeyPair struct {
- Public PublicKey `json:"public"`
- Private PrivateKey `json:"private"`
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (k *KeyPair) UnmarshalJSON(data []byte) error {
- type keyPair KeyPair
- if err := json.Unmarshal(data, (*keyPair)(k)); err != nil {
- return err
- }
- return k.validate()
-}
-
-// UnmarshalYAML implements yaml.Unmarshaler.
-func (k *KeyPair) UnmarshalYAML(unmarshal func(interface{}) error) error {
- type keyPair KeyPair
- if err := unmarshal((*keyPair)(k)); err != nil {
- return err
- }
- return k.validate()
-}
-
-func (k *KeyPair) validate() error {
- if k.Public.isZero() {
- return errgo.Newf("missing public key")
- }
- if k.Private.isZero() {
- return errgo.Newf("missing private key")
- }
- return nil
-}
-
-// GenerateKey generates a new key pair.
-func GenerateKey() (*KeyPair, error) {
- var key KeyPair
- pub, priv, err := box.GenerateKey(rand.Reader)
- if err != nil {
- return nil, err
- }
- key.Public = PublicKey{*pub}
- key.Private = PrivateKey{*priv}
- return &key, nil
-}
-
-// MustGenerateKey is like GenerateKey but panics if GenerateKey returns
-// an error - useful in tests.
-func MustGenerateKey() *KeyPair {
- key, err := GenerateKey()
- if err != nil {
- panic(errgo.Notef(err, "cannot generate key"))
- }
- return key
-}
-
-// String implements the fmt.Stringer interface
-// by returning the base64 representation of the
-// public key part of key.
-func (key *KeyPair) String() string {
- return key.Public.String()
-}
-
-type emptyLocator struct{}
-
-func (emptyLocator) ThirdPartyInfo(context.Context, string) (ThirdPartyInfo, error) {
- return ThirdPartyInfo{}, ErrNotFound
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/logger.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/logger.go
deleted file mode 100644
index acb5a1f5..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/logger.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package bakery
-
-import (
- "context"
-)
-
-// Logger is used by the bakery to log informational messages
-// about bakery operations.
-type Logger interface {
- Infof(ctx context.Context, f string, args ...interface{})
- Debugf(ctx context.Context, f string, args ...interface{})
-}
-
-// DefaultLogger returns a Logger instance that does nothing.
-//
-// Deprecated: DefaultLogger exists for historical compatibility
-// only. Previously it logged using github.com/juju/loggo.
-func DefaultLogger(name string) Logger {
- return nopLogger{}
-}
-
-type nopLogger struct{}
-
-// Debugf implements Logger.Debugf.
-func (nopLogger) Debugf(context.Context, string, ...interface{}) {}
-
-// Debugf implements Logger.Infof.
-func (nopLogger) Infof(context.Context, string, ...interface{}) {}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/macaroon.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/macaroon.go
deleted file mode 100644
index d5ad3b64..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/macaroon.go
+++ /dev/null
@@ -1,356 +0,0 @@
-package bakery
-
-import (
- "bytes"
- "context"
- "encoding/base64"
- "encoding/binary"
- "encoding/json"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// legacyNamespace holds the standard namespace as used by
-// pre-version3 macaroons.
-func legacyNamespace() *checkers.Namespace {
- ns := checkers.NewNamespace(nil)
- ns.Register(checkers.StdNamespace, "")
- return ns
-}
-
-// Macaroon represents an undischarged macaroon along with its first
-// party caveat namespace and associated third party caveat information
-// which should be passed to the third party when discharging a caveat.
-type Macaroon struct {
- // m holds the underlying macaroon.
- m *macaroon.Macaroon
-
- // version holds the version of the macaroon.
- version Version
-
- // caveatData maps from a third party caveat id to its
- // associated information, usually public-key encrypted with the
- // third party's public key.
- //
- // If version is less than Version3, this will always be nil,
- // because clients prior to that version do not support
- // macaroon-external caveat ids.
- caveatData map[string][]byte
-
- // namespace holds the first-party caveat namespace of the macaroon.
- namespace *checkers.Namespace
-
- // caveatIdPrefix holds the prefix to use for the ids of any third
- // party caveats created. This can be set when Discharge creates a
- // discharge macaroon.
- caveatIdPrefix []byte
-}
-
-// NewLegacyMacaroon returns a new macaroon holding m.
-// This should only be used when there's no alternative
-// (for example when m has been unmarshaled
-// from some alternative format).
-func NewLegacyMacaroon(m *macaroon.Macaroon) (*Macaroon, error) {
- v, err := bakeryVersion(m.Version())
- if err != nil {
- return nil, errgo.Mask(err)
- }
- return &Macaroon{
- m: m,
- version: v,
- namespace: legacyNamespace(),
- }, nil
-}
-
-type macaroonJSON struct {
- Macaroon *macaroon.Macaroon `json:"m"`
- Version Version `json:"v"`
- // Note: CaveatData is encoded using URL-base64-encoded keys
- // because JSON cannot deal with arbitrary byte sequences
- // in its strings, and URL-base64 values to match the
- // standard macaroon encoding.
- CaveatData map[string]string `json:"cdata,omitempty"`
- Namespace *checkers.Namespace `json:"ns"`
-}
-
-// Clone returns a copy of the macaroon. Note that the the new
-// macaroon's namespace still points to the same underlying Namespace -
-// copying the macaroon does not make a copy of the namespace.
-func (m *Macaroon) Clone() *Macaroon {
- m1 := *m
- m1.m = m1.m.Clone()
- m1.caveatData = make(map[string][]byte)
- for id, data := range m.caveatData {
- m1.caveatData[id] = data
- }
- return &m1
-}
-
-// MarshalJSON implements json.Marshaler by marshaling
-// the macaroon into the original macaroon format if the
-// version is earlier than Version3.
-func (m *Macaroon) MarshalJSON() ([]byte, error) {
- if m.version < Version3 {
- if len(m.caveatData) > 0 {
- return nil, errgo.Newf("cannot marshal pre-version3 macaroon with external caveat data")
- }
- return m.m.MarshalJSON()
- }
- caveatData := make(map[string]string)
- for id, data := range m.caveatData {
- caveatData[base64.RawURLEncoding.EncodeToString([]byte(id))] = base64.RawURLEncoding.EncodeToString(data)
- }
- return json.Marshal(macaroonJSON{
- Macaroon: m.m,
- Version: m.version,
- CaveatData: caveatData,
- Namespace: m.namespace,
- })
-}
-
-// UnmarshalJSON implements json.Unmarshaler by unmarshaling in a
-// backwardly compatible way - if provided with a previous macaroon
-// version, it will unmarshal that too.
-func (m *Macaroon) UnmarshalJSON(data []byte) error {
- // First try with new data format.
- var m1 macaroonJSON
- if err := json.Unmarshal(data, &m1); err != nil {
- // If we get an unmarshal error, we won't be able
- // to unmarshal into the old format either, as extra fields
- // are ignored.
- return errgo.Mask(err)
- }
- if m1.Macaroon == nil {
- return m.unmarshalJSONOldFormat(data)
- }
- // We've got macaroon field - it's the new format.
- if m1.Version < Version3 || m1.Version > LatestVersion {
- return errgo.Newf("unexpected bakery macaroon version; got %d want %d", m1.Version, Version3)
- }
- if got, want := m1.Macaroon.Version(), MacaroonVersion(m1.Version); got != want {
- return errgo.Newf("underlying macaroon has inconsistent version; got %d want %d", got, want)
- }
- caveatData := make(map[string][]byte)
- for id64, data64 := range m1.CaveatData {
- id, err := macaroon.Base64Decode([]byte(id64))
- if err != nil {
- return errgo.Notef(err, "cannot decode caveat id")
- }
- data, err := macaroon.Base64Decode([]byte(data64))
- if err != nil {
- return errgo.Notef(err, "cannot decode caveat")
- }
- caveatData[string(id)] = data
- }
- m.caveatData = caveatData
- m.m = m1.Macaroon
- m.namespace = m1.Namespace
- // TODO should we allow version > LatestVersion here?
- m.version = m1.Version
- return nil
-}
-
-// unmarshalJSONOldFormat unmarshals the data from an old format
-// macaroon (without any external caveats or namespace).
-func (m *Macaroon) unmarshalJSONOldFormat(data []byte) error {
- // Try to unmarshal from the original format.
- var m1 *macaroon.Macaroon
- if err := json.Unmarshal(data, &m1); err != nil {
- return errgo.Mask(err)
- }
- m2, err := NewLegacyMacaroon(m1)
- if err != nil {
- return errgo.Mask(err)
- }
- *m = *m2
- return nil
-}
-
-// bakeryVersion returns a bakery version that corresponds to
-// the macaroon version v. It is necessarily approximate because
-// several bakery versions can correspond to a single macaroon
-// version, so it's only of use when decoding legacy formats
-// (in Macaroon.UnmarshalJSON).
-//
-// It will return an error if it doesn't recognize the version.
-func bakeryVersion(v macaroon.Version) (Version, error) {
- switch v {
- case macaroon.V1:
- // Use version 1 because we don't know of any existing
- // version 0 clients.
- return Version1, nil
- case macaroon.V2:
- // Note that this could also correspond to Version3, but
- // this logic is explicitly for legacy versions.
- return Version2, nil
- default:
- return 0, errgo.Newf("unknown macaroon version when legacy-unmarshaling bakery macaroon; got %d", v)
- }
-}
-
-// NewMacaroon creates and returns a new macaroon with the given root
-// key, id and location. If the version is more than the latest known
-// version, the latest known version will be used. The namespace is that
-// of the service creating it.
-func NewMacaroon(rootKey, id []byte, location string, version Version, ns *checkers.Namespace) (*Macaroon, error) {
- if version > LatestVersion {
- version = LatestVersion
- }
- m, err := macaroon.New(rootKey, id, location, MacaroonVersion(version))
- if err != nil {
- return nil, errgo.Notef(err, "cannot create macaroon")
- }
- return &Macaroon{
- m: m,
- version: version,
- namespace: ns,
- }, nil
-}
-
-// M returns the underlying macaroon held within m.
-func (m *Macaroon) M() *macaroon.Macaroon {
- return m.m
-}
-
-// Version returns the bakery version of the first party
-// that created the macaroon.
-func (m *Macaroon) Version() Version {
- return m.version
-}
-
-// Namespace returns the first party caveat namespace of the macaroon.
-func (m *Macaroon) Namespace() *checkers.Namespace {
- return m.namespace
-}
-
-// AddCaveats is a convenienced method that calls m.AddCaveat for each
-// caveat in cavs.
-func (m *Macaroon) AddCaveats(ctx context.Context, cavs []checkers.Caveat, key *KeyPair, loc ThirdPartyLocator) error {
- for _, cav := range cavs {
- if err := m.AddCaveat(ctx, cav, key, loc); err != nil {
- return errgo.Notef(err, "cannot add caveat %#v", cav)
- }
- }
- return nil
-}
-
-// AddCaveat adds a caveat to the given macaroon.
-//
-// If it's a third-party caveat, it encrypts it using the given key pair
-// and by looking up the location using the given locator. If it's a
-// first party cavat, key and loc are unused.
-//
-// As a special case, if the caveat's Location field has the prefix
-// "local " the caveat is added as a client self-discharge caveat using
-// the public key base64-encoded in the rest of the location. In this
-// case, the Condition field must be empty. The resulting third-party
-// caveat will encode the condition "true" encrypted with that public
-// key. See LocalThirdPartyCaveat for a way of creating such caveats.
-func (m *Macaroon) AddCaveat(ctx context.Context, cav checkers.Caveat, key *KeyPair, loc ThirdPartyLocator) error {
- if cav.Location == "" {
- if err := m.m.AddFirstPartyCaveat([]byte(m.namespace.ResolveCaveat(cav).Condition)); err != nil {
- return errgo.Mask(err)
- }
- return nil
- }
- if key == nil {
- return errgo.Newf("no private key to encrypt third party caveat")
- }
- var info ThirdPartyInfo
- if localInfo, ok := parseLocalLocation(cav.Location); ok {
- info = localInfo
- cav.Location = "local"
- if cav.Condition != "" {
- return errgo.New("cannot specify caveat condition in local third-party caveat")
- }
- cav.Condition = "true"
- } else {
- if loc == nil {
- return errgo.Newf("no locator when adding third party caveat")
- }
- var err error
- info, err = loc.ThirdPartyInfo(ctx, cav.Location)
- if err != nil {
- return errgo.Notef(err, "cannot find public key for location %q", cav.Location)
- }
- }
- rootKey, err := randomBytes(24)
- if err != nil {
- return errgo.Notef(err, "cannot generate third party secret")
- }
- // Use the least supported version to encode the caveat.
- if m.version < info.Version {
- info.Version = m.version
- }
- caveatInfo, err := encodeCaveat(cav.Condition, rootKey, info, key, m.namespace)
- if err != nil {
- return errgo.Notef(err, "cannot create third party caveat at %q", cav.Location)
- }
- var id []byte
- if info.Version < Version3 {
- // We're encoding for an earlier client or third party which does
- // not understand bundled caveat info, so use the encoded
- // caveat information as the caveat id.
- id = caveatInfo
- } else {
- id = m.newCaveatId(m.caveatIdPrefix)
- if m.caveatData == nil {
- m.caveatData = make(map[string][]byte)
- }
- m.caveatData[string(id)] = caveatInfo
- }
- if err := m.m.AddThirdPartyCaveat(rootKey, id, cav.Location); err != nil {
- return errgo.Notef(err, "cannot add third party caveat")
- }
- return nil
-}
-
-// newCaveatId returns a third party caveat id that
-// does not duplicate any third party caveat ids already inside m.
-//
-// If base is non-empty, it is used as the id prefix.
-func (m *Macaroon) newCaveatId(base []byte) []byte {
- var id []byte
- if len(base) > 0 {
- id = make([]byte, len(base), len(base)+binary.MaxVarintLen64)
- copy(id, base)
- } else {
- id = make([]byte, 0, 1+binary.MaxVarintLen32)
- // Add a version byte to the caveat id. Technically
- // this is unnecessary as the caveat-decoding logic
- // that looks at versions should never see this id,
- // but if the caveat payload isn't provided with the
- // payload, having this version gives a strong indication
- // that the payload has been omitted so we can produce
- // a better error for the user.
- id = append(id, byte(Version3))
- }
-
- // Iterate through integers looking for one that isn't already used,
- // starting from n so that if everyone is using this same algorithm,
- // we'll only perform one iteration.
- //
- // Note that although this looks like an infinite loop,
- // there's no way that it can run for more iterations
- // than the total number of existing third party caveats,
- // whatever their ids.
- caveats := m.m.Caveats()
-again:
- for i := len(m.caveatData); ; i++ {
- // We append a varint to the end of the id and assume that
- // any client that's created the id that we're using as a base
- // is using similar conventions - in the worst case they might
- // end up with a duplicate third party caveat id and thus create
- // a macaroon that cannot be discharged.
- id1 := appendUvarint(id, uint64(i))
- for _, cav := range caveats {
- if cav.VerificationId != nil && bytes.Equal(cav.Id, id1) {
- continue again
- }
- }
- return id1
- }
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/oven.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/oven.go
deleted file mode 100644
index 83ce8908..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/oven.go
+++ /dev/null
@@ -1,359 +0,0 @@
-package bakery
-
-import (
- "bytes"
- "context"
- "encoding/base64"
- "sort"
-
- "github.com/go-macaroon-bakery/macaroonpb"
- "github.com/rogpeppe/fastuuid"
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// MacaroonVerifier verifies macaroons and returns the operations and
-// caveats they're associated with.
-type MacaroonVerifier interface {
- // VerifyMacaroon verifies the signature of the given macaroon and returns
- // information on its associated operations, and all the first party
- // caveat conditions that need to be checked.
- //
- // This method should not check first party caveats itself.
- //
- // It should return a *VerificationError if the error occurred
- // because the macaroon signature failed or the root key
- // was not found - any other error will be treated as fatal
- // by Checker and cause authorization to terminate.
- VerifyMacaroon(ctx context.Context, ms macaroon.Slice) ([]Op, []string, error)
-}
-
-var uuidGen = fastuuid.MustNewGenerator()
-
-// Oven bakes macaroons. They emerge sweet and delicious
-// and ready for use in a Checker.
-//
-// All macaroons are associated with one or more operations (see
-// the Op type) which define the capabilities of the macaroon.
-//
-// There is one special operation, "login" (defined by LoginOp)
-// which grants the capability to speak for a particular user.
-// The login capability will never be mixed with other capabilities.
-//
-// It is up to the caller to decide on semantics for other operations.
-type Oven struct {
- p OvenParams
-}
-
-type OvenParams struct {
- // Namespace holds the namespace to use when adding first party caveats.
- // If this is nil, checkers.New(nil).Namespace will be used.
- Namespace *checkers.Namespace
-
- // RootKeyStoreForEntity returns the macaroon storage to be
- // used for root keys associated with macaroons created
- // wth NewMacaroon.
- //
- // If this is nil, NewMemRootKeyStore will be used to create
- // a new store to be used for all entities.
- RootKeyStoreForOps func(ops []Op) RootKeyStore
-
- // Key holds the private key pair used to encrypt third party caveats.
- // If it is nil, no third party caveats can be created.
- Key *KeyPair
-
- // Location holds the location that will be associated with new macaroons
- // (as returned by Macaroon.Location).
- Location string
-
- // Locator is used to find out information on third parties when
- // adding third party caveats. If this is nil, no non-local third
- // party caveats can be added.
- Locator ThirdPartyLocator
-
- // LegacyMacaroonOp holds the operation to associate with old
- // macaroons that don't have associated operations.
- // If this is empty, legacy macaroons will not be associated
- // with any operations.
- LegacyMacaroonOp Op
-
- // TODO max macaroon or macaroon id size?
-}
-
-// NewOven returns a new oven using the given parameters.
-func NewOven(p OvenParams) *Oven {
- if p.Locator == nil {
- p.Locator = emptyLocator{}
- }
- if p.RootKeyStoreForOps == nil {
- store := NewMemRootKeyStore()
- p.RootKeyStoreForOps = func(ops []Op) RootKeyStore {
- return store
- }
- }
- if p.Namespace == nil {
- p.Namespace = checkers.New(nil).Namespace()
- }
- return &Oven{
- p: p,
- }
-}
-
-// VerifyMacaroon implements MacaroonVerifier.VerifyMacaroon, making Oven
-// an instance of MacaroonVerifier.
-//
-// For macaroons minted with previous bakery versions, it always
-// returns a single LoginOp operation.
-func (o *Oven) VerifyMacaroon(ctx context.Context, ms macaroon.Slice) (ops []Op, conditions []string, err error) {
- if len(ms) == 0 {
- return nil, nil, errgo.Newf("no macaroons in slice")
- }
- storageId, ops, err := o.decodeMacaroonId(ms[0].Id())
- if err != nil {
- return nil, nil, errgo.Mask(err)
- }
- rootKey, err := o.p.RootKeyStoreForOps(ops).Get(ctx, storageId)
- if err != nil {
- if errgo.Cause(err) != ErrNotFound {
- return nil, nil, errgo.Notef(err, "cannot get macaroon")
- }
- // If the macaroon was not found, it is probably
- // because it's been removed after time-expiry,
- // so return a verification error.
- return nil, nil, &VerificationError{
- Reason: errgo.Newf("macaroon not found in storage"),
- }
- }
- conditions, err = ms[0].VerifySignature(rootKey, ms[1:])
- if err != nil {
- return nil, nil, &VerificationError{
- Reason: errgo.Mask(err),
- }
- }
- return ops, conditions, nil
-}
-
-func (o *Oven) decodeMacaroonId(id []byte) (storageId []byte, ops []Op, err error) {
- base64Decoded := false
- if id[0] == 'A' {
- // The first byte is not a version number and it's 'A', which is the
- // base64 encoding of the top 6 bits (all zero) of the version number 2 or 3,
- // so we assume that it's the base64 encoding of a new-style
- // macaroon id, so we base64 decode it.
- //
- // Note that old-style ids always start with an ASCII character >= 4
- // (> 32 in fact) so this logic won't be triggered for those.
- dec := make([]byte, base64.RawURLEncoding.DecodedLen(len(id)))
- n, err := base64.RawURLEncoding.Decode(dec, id)
- if err == nil {
- // Set the id only on success - if it's a bad encoding, we'll get a not-found error
- // which is fine because "not found" is a correct description of the issue - we
- // can't find the root key for the given id.
- id = dec[0:n]
- base64Decoded = true
- }
- }
- // Trim any extraneous information from the id before retrieving
- // it from storage, including the UUID that's added when
- // creating macaroons to make all macaroons unique even if
- // they're using the same root key.
- switch id[0] {
- case byte(Version2):
- // Skip the UUID at the start of the id.
- storageId = id[1+16:]
- case byte(Version3):
- var id1 macaroonpb.MacaroonId
- if err := id1.UnmarshalBinary(id[1:]); err != nil {
- return nil, nil, errgo.Notef(err, "cannot unmarshal macaroon id")
- }
- if len(id1.Ops) == 0 || len(id1.Ops[0].Actions) == 0 {
- return nil, nil, errgo.Newf("no operations found in macaroon")
- }
- ops = make([]Op, 0, len(id1.Ops))
- for _, op := range id1.Ops {
- for _, action := range op.Actions {
- ops = append(ops, Op{
- Entity: op.Entity,
- Action: action,
- })
- }
- }
- return id1.StorageId, ops, nil
- }
- if !base64Decoded && isLowerCaseHexChar(id[0]) {
- // It's an old-style id, probably with a hyphenated UUID.
- // so trim that off.
- if i := bytes.LastIndexByte(id, '-'); i >= 0 {
- storageId = id[0:i]
- }
- }
- if op := o.p.LegacyMacaroonOp; op != (Op{}) {
- ops = []Op{op}
- }
- return storageId, ops, nil
-}
-
-// NewMacaroon takes a macaroon with the given version from the oven, associates it with the given operations
-// and attaches the given caveats. There must be at least one operation specified.
-func (o *Oven) NewMacaroon(ctx context.Context, version Version, caveats []checkers.Caveat, ops ...Op) (*Macaroon, error) {
- if len(ops) == 0 {
- return nil, errgo.Newf("cannot mint a macaroon associated with no operations")
- }
- ops = CanonicalOps(ops)
- rootKey, storageId, err := o.p.RootKeyStoreForOps(ops).RootKey(ctx)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- id, err := o.newMacaroonId(ctx, ops, storageId)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- idBytesNoVersion, err := id.MarshalBinary()
- if err != nil {
- return nil, errgo.Mask(err)
- }
- idBytes := make([]byte, len(idBytesNoVersion)+1)
- idBytes[0] = byte(LatestVersion)
- // TODO We could use a proto.Buffer to avoid this copy.
- copy(idBytes[1:], idBytesNoVersion)
-
- if MacaroonVersion(version) < macaroon.V2 {
- // The old macaroon format required valid text for the macaroon id,
- // so base64-encode it.
- b64data := make([]byte, base64.RawURLEncoding.EncodedLen(len(idBytes)))
- base64.RawURLEncoding.Encode(b64data, idBytes)
- idBytes = b64data
- }
- m, err := NewMacaroon(rootKey, idBytes, o.p.Location, version, o.p.Namespace)
- if err != nil {
- return nil, errgo.Notef(err, "cannot create macaroon with version %v", version)
- }
- if err := o.AddCaveats(ctx, m, caveats); err != nil {
- return nil, errgo.Mask(err)
- }
- return m, nil
-}
-
-// AddCaveat adds a caveat to the given macaroon.
-func (o *Oven) AddCaveat(ctx context.Context, m *Macaroon, cav checkers.Caveat) error {
- return m.AddCaveat(ctx, cav, o.p.Key, o.p.Locator)
-}
-
-// AddCaveats adds all the caveats to the given macaroon.
-func (o *Oven) AddCaveats(ctx context.Context, m *Macaroon, caveats []checkers.Caveat) error {
- return m.AddCaveats(ctx, caveats, o.p.Key, o.p.Locator)
-}
-
-// Key returns the oven's private/public key par.
-func (o *Oven) Key() *KeyPair {
- return o.p.Key
-}
-
-// Locator returns the third party locator that the
-// oven was created with.
-func (o *Oven) Locator() ThirdPartyLocator {
- return o.p.Locator
-}
-
-// CanonicalOps returns the given operations slice sorted
-// with duplicates removed.
-func CanonicalOps(ops []Op) []Op {
- canonOps := opsByValue(ops)
- needNewSlice := false
- for i := 1; i < len(ops); i++ {
- if !canonOps.Less(i-1, i) {
- needNewSlice = true
- break
- }
- }
- if !needNewSlice {
- return ops
- }
- canonOps = make([]Op, len(ops))
- copy(canonOps, ops)
- sort.Sort(canonOps)
-
- // Note we know that there's at least one operation here
- // because we'd have returned earlier if the slice was empty.
- j := 0
- for _, op := range canonOps[1:] {
- if op != canonOps[j] {
- j++
- canonOps[j] = op
- }
- }
- return canonOps[0 : j+1]
-}
-
-func (o *Oven) newMacaroonId(ctx context.Context, ops []Op, storageId []byte) (*macaroonpb.MacaroonId, error) {
- uuid := uuidGen.Next()
- nonce := uuid[0:16]
- return &macaroonpb.MacaroonId{
- Nonce: nonce,
- StorageId: storageId,
- Ops: macaroonIdOps(ops),
- }, nil
-}
-
-// macaroonIdOps returns operations suitable for serializing
-// as part of an *macaroonpb.MacaroonId. It assumes that
-// ops has been canonicalized and that there's at least
-// one operation.
-func macaroonIdOps(ops []Op) []*macaroonpb.Op {
- idOps := make([]macaroonpb.Op, 0, len(ops))
- idOps = append(idOps, macaroonpb.Op{
- Entity: ops[0].Entity,
- Actions: []string{ops[0].Action},
- })
- i := 0
- idOp := &idOps[0]
- for _, op := range ops[1:] {
- if op.Entity != idOp.Entity {
- idOps = append(idOps, macaroonpb.Op{
- Entity: op.Entity,
- Actions: []string{op.Action},
- })
- i++
- idOp = &idOps[i]
- continue
- }
- if op.Action != idOp.Actions[len(idOp.Actions)-1] {
- idOp.Actions = append(idOp.Actions, op.Action)
- }
- }
- idOpPtrs := make([]*macaroonpb.Op, len(idOps))
- for i := range idOps {
- idOpPtrs[i] = &idOps[i]
- }
- return idOpPtrs
-}
-
-type opsByValue []Op
-
-func (o opsByValue) Less(i, j int) bool {
- o0, o1 := o[i], o[j]
- if o0.Entity != o1.Entity {
- return o0.Entity < o1.Entity
- }
- return o0.Action < o1.Action
-}
-
-func (o opsByValue) Swap(i, j int) {
- o[i], o[j] = o[j], o[i]
-}
-
-func (o opsByValue) Len() int {
- return len(o)
-}
-
-func isLowerCaseHexChar(c byte) bool {
- switch {
- case '0' <= c && c <= '9':
- return true
- case 'a' <= c && c <= 'f':
- return true
- }
- return false
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/slice.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/slice.go
deleted file mode 100644
index 20c5fcc7..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/slice.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package bakery
-
-import (
- "context"
- "fmt"
- "time"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// Slice holds a slice of unbound macaroons.
-type Slice []*Macaroon
-
-// Bind prepares the macaroon slice for use in a request. This must be
-// done before presenting the macaroons to a service for use as
-// authorization tokens. The result will only be valid
-// if s contains discharge macaroons for all third party
-// caveats.
-//
-// All the macaroons in the returned slice will be copies
-// of this in s, not references.
-func (s Slice) Bind() macaroon.Slice {
- if len(s) == 0 {
- return nil
- }
- ms := make(macaroon.Slice, len(s))
- ms[0] = s[0].M().Clone()
- rootSig := ms[0].Signature()
- for i, m := range s[1:] {
- m1 := m.M().Clone()
- m1.Bind(rootSig)
- ms[i+1] = m1
- }
- return ms
-}
-
-// Purge returns a new slice holding all macaroons in s
-// that expire after the given time.
-func (ms Slice) Purge(t time.Time) Slice {
- ms1 := make(Slice, 0, len(ms))
- for i, m := range ms {
- et, ok := checkers.ExpiryTime(m.Namespace(), m.M().Caveats())
- if !ok || et.After(t) {
- ms1 = append(ms1, m)
- } else if i == 0 {
- // The primary macaroon has expired, so all its discharges
- // have expired too.
- // TODO purge all discharge macaroons when the macaroon
- // containing their third-party caveat expires.
- return nil
- }
- }
- return ms1
-}
-
-// DischargeAll discharges all the third party caveats in the slice for
-// which discharge macaroons are not already present, using getDischarge
-// to acquire the discharge macaroons. It always returns the slice with
-// any acquired discharge macaroons added, even on error. It returns an
-// error if all the discharges could not be acquired.
-//
-// Note that this differs from DischargeAll in that it can be given several existing
-// discharges, and that the resulting discharges are not bound to the primary,
-// so it's still possible to add caveats and reacquire expired discharges
-// without reacquiring the primary macaroon.
-func (ms Slice) DischargeAll(ctx context.Context, getDischarge func(ctx context.Context, cav macaroon.Caveat, encryptedCaveat []byte) (*Macaroon, error), localKey *KeyPair) (Slice, error) {
- if len(ms) == 0 {
- return nil, errgo.Newf("no macaroons to discharge")
- }
- ms1 := make(Slice, len(ms))
- copy(ms1, ms)
- // have holds the keys of all the macaroon ids in the slice.
- type needCaveat struct {
- // cav holds the caveat that needs discharge.
- cav macaroon.Caveat
- // encryptedCaveat holds encrypted caveat
- // if it was held externally.
- encryptedCaveat []byte
- }
- var need []needCaveat
- have := make(map[string]bool)
- for _, m := range ms[1:] {
- have[string(m.M().Id())] = true
- }
- // addCaveats adds any required third party caveats to the need slice
- // that aren't already present .
- addCaveats := func(m *Macaroon) {
- for _, cav := range m.M().Caveats() {
- if len(cav.VerificationId) == 0 || have[string(cav.Id)] {
- continue
- }
- need = append(need, needCaveat{
- cav: cav,
- encryptedCaveat: m.caveatData[string(cav.Id)],
- })
- }
- }
- for _, m := range ms {
- addCaveats(m)
- }
- var errs []error
- for len(need) > 0 {
- cav := need[0]
- need = need[1:]
- var dm *Macaroon
- var err error
- if localKey != nil && cav.cav.Location == "local" {
- // TODO use a small caveat id.
- dm, err = Discharge(ctx, DischargeParams{
- Key: localKey,
- Checker: localDischargeChecker,
- Caveat: cav.encryptedCaveat,
- Id: cav.cav.Id,
- Locator: emptyLocator{},
- })
- } else {
- dm, err = getDischarge(ctx, cav.cav, cav.encryptedCaveat)
- }
- if err != nil {
- errs = append(errs, errgo.NoteMask(err, fmt.Sprintf("cannot get discharge from %q", cav.cav.Location), errgo.Any))
- continue
- }
- ms1 = append(ms1, dm)
- addCaveats(dm)
- }
- if errs != nil {
- // TODO log other errors? Return them all?
- return ms1, errgo.Mask(errs[0], errgo.Any)
- }
- return ms1, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/store.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/store.go
deleted file mode 100644
index b8b19408..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/store.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package bakery
-
-import (
- "context"
- "sync"
-)
-
-// RootKeyStore defines store for macaroon root keys.
-type RootKeyStore interface {
- // Get returns the root key for the given id.
- // If the item is not there, it returns ErrNotFound.
- Get(ctx context.Context, id []byte) ([]byte, error)
-
- // RootKey returns the root key to be used for making a new
- // macaroon, and an id that can be used to look it up later with
- // the Get method.
- //
- // Note that the root keys should remain available for as long
- // as the macaroons using them are valid.
- //
- // Note that there is no need for it to return a new root key
- // for every call - keys may be reused, although some key
- // cycling is over time is advisable.
- RootKey(ctx context.Context) (rootKey []byte, id []byte, err error)
-}
-
-// NewMemRootKeyStore returns an implementation of
-// Store that generates a single key and always
-// returns that from RootKey. The same id ("0") is always
-// used.
-func NewMemRootKeyStore() RootKeyStore {
- return new(memRootKeyStore)
-}
-
-type memRootKeyStore struct {
- mu sync.Mutex
- key []byte
-}
-
-// Get implements Store.Get.
-func (s *memRootKeyStore) Get(_ context.Context, id []byte) ([]byte, error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if len(id) != 1 || id[0] != '0' || s.key == nil {
- return nil, ErrNotFound
- }
- return s.key, nil
-}
-
-// RootKey implements Store.RootKey by always returning the same root
-// key.
-func (s *memRootKeyStore) RootKey(context.Context) (rootKey, id []byte, err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.key == nil {
- newKey, err := randomBytes(24)
- if err != nil {
- return nil, nil, err
- }
- s.key = newKey
- }
- return s.key, []byte("0"), nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/version.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/version.go
deleted file mode 100644
index 9f8e87bb..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/version.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package bakery
-
-import "gopkg.in/macaroon.v2"
-
-// Version represents a version of the bakery protocol.
-type Version int
-
-const (
- // In version 0, discharge-required errors use status 407
- Version0 Version = 0
- // In version 1, discharge-required errors use status 401.
- Version1 Version = 1
- // In version 2, binary macaroons and caveat ids are supported.
- Version2 Version = 2
- // In version 3, we support operations associated with macaroons
- // and external third party caveats.
- Version3 Version = 3
- LatestVersion = Version3
-)
-
-// MacaroonVersion returns the macaroon version that should
-// be used with the given bakery Version.
-func MacaroonVersion(v Version) macaroon.Version {
- switch v {
- case Version0, Version1:
- return macaroon.V1
- default:
- return macaroon.V2
- }
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/browser.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/browser.go
deleted file mode 100644
index 8cc2e2a3..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/browser.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
- "os"
-
- "github.com/juju/webbrowser"
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
-)
-
-const WebBrowserInteractionKind = "browser-window"
-
-// WaitTokenResponse holds the response type
-// returned, JSON-encoded, from the waitToken
-// URL passed to SetBrowserInteraction.
-type WaitTokenResponse struct {
- Kind string `json:"kind"`
- // Token holds the token value when it's well-formed utf-8
- Token string `json:"token,omitempty"`
- // Token64 holds the token value, base64 encoded, when it's
- // not well-formed utf-8.
- Token64 string `json:"token64,omitempty"`
-}
-
-// WaitResponse holds the type that should be returned
-// by an HTTP response made to a LegacyWaitURL
-// (See the ErrorInfo type).
-type WaitResponse struct {
- Macaroon *bakery.Macaroon
-}
-
-// WebBrowserInteractionInfo holds the information
-// expected in the browser-window interaction
-// entry in an interaction-required error.
-type WebBrowserInteractionInfo struct {
- // VisitURL holds the URL to be visited in a web browser.
- VisitURL string
-
- // WaitTokenURL holds a URL that will block on GET
- // until the browser interaction has completed.
- // On success, the response is expected to hold a waitTokenResponse
- // in its body holding the token to be returned from the
- // Interact method.
- WaitTokenURL string
-}
-
-var (
- _ Interactor = WebBrowserInteractor{}
- _ LegacyInteractor = WebBrowserInteractor{}
-)
-
-// OpenWebBrowser opens a web browser at the
-// given URL. If the OS is not recognised, the URL
-// is just printed to standard output.
-func OpenWebBrowser(url *url.URL) error {
- err := webbrowser.Open(url)
- if err == nil {
- fmt.Fprintf(os.Stderr, "Opening an authorization web page in your browser.\n")
- fmt.Fprintf(os.Stderr, "If it does not open, please open this URL:\n%s\n", url)
- return nil
- }
- if err == webbrowser.ErrNoBrowser {
- fmt.Fprintf(os.Stderr, "Please open this URL in your browser to authorize:\n%s\n", url)
- return nil
- }
- return err
-}
-
-// SetWebBrowserInteraction adds information about web-browser-based
-// interaction to the given error, which should be an
-// interaction-required error that's about to be returned from a
-// discharge request.
-//
-// The visitURL parameter holds a URL that should be visited by the user
-// in a web browser; the waitTokenURL parameter holds a URL that can be
-// long-polled to acquire the resulting discharge token.
-//
-// Use SetLegacyInteraction to add support for legacy clients
-// that don't understand the newer InteractionMethods field.
-func SetWebBrowserInteraction(e *Error, visitURL, waitTokenURL string) {
- e.SetInteraction(WebBrowserInteractionKind, WebBrowserInteractionInfo{
- VisitURL: visitURL,
- WaitTokenURL: waitTokenURL,
- })
-}
-
-// SetLegacyInteraction adds information about web-browser-based
-// interaction (or other kinds of legacy-protocol interaction) to the
-// given error, which should be an interaction-required error that's
-// about to be returned from a discharge request.
-//
-// The visitURL parameter holds a URL that should be visited by the user
-// in a web browser (or with an "Accept: application/json" header to
-// find out the set of legacy interaction methods).
-//
-// The waitURL parameter holds a URL that can be long-polled
-// to acquire the discharge macaroon.
-func SetLegacyInteraction(e *Error, visitURL, waitURL string) {
- if e.Info == nil {
- e.Info = new(ErrorInfo)
- }
- e.Info.LegacyVisitURL = visitURL
- e.Info.LegacyWaitURL = waitURL
-}
-
-// WebBrowserInteractor handls web-browser-based
-// interaction-required errors by opening a web
-// browser to allow the user to prove their
-// credentials interactively.
-//
-// It implements the Interactor interface, so instances
-// can be used with Client.AddInteractor.
-type WebBrowserInteractor struct {
- // OpenWebBrowser is used to visit a page in
- // the user's web browser. If it's nil, the
- // OpenWebBrowser function will be used.
- OpenWebBrowser func(*url.URL) error
-}
-
-// Kind implements Interactor.Kind.
-func (WebBrowserInteractor) Kind() string {
- return WebBrowserInteractionKind
-}
-
-// Interact implements Interactor.Interact by opening a new web page.
-func (wi WebBrowserInteractor) Interact(ctx context.Context, client *Client, location string, irErr *Error) (*DischargeToken, error) {
- var p WebBrowserInteractionInfo
- if err := irErr.InteractionMethod(wi.Kind(), &p); err != nil {
- return nil, errgo.Mask(err, errgo.Is(ErrInteractionMethodNotFound))
- }
- visitURL, err := relativeURL(location, p.VisitURL)
- if err != nil {
- return nil, errgo.Notef(err, "cannot make relative visit URL")
- }
- waitTokenURL, err := relativeURL(location, p.WaitTokenURL)
- if err != nil {
- return nil, errgo.Notef(err, "cannot make relative wait URL")
- }
- if err := wi.openWebBrowser(visitURL); err != nil {
- return nil, errgo.Mask(err)
- }
- return waitForToken(ctx, client, waitTokenURL)
-}
-
-func (wi WebBrowserInteractor) openWebBrowser(u *url.URL) error {
- open := wi.OpenWebBrowser
- if open == nil {
- open = OpenWebBrowser
- }
- if err := open(u); err != nil {
- return errgo.Mask(err)
- }
- return nil
-}
-
-// waitForToken returns a token from a the waitToken URL
-func waitForToken(ctx context.Context, client *Client, waitTokenURL *url.URL) (*DischargeToken, error) {
- // TODO integrate this with waitForMacaroon somehow?
- req, err := http.NewRequest("GET", waitTokenURL.String(), nil)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- req = req.WithContext(ctx)
- httpResp, err := client.Client.Do(req)
- if err != nil {
- return nil, errgo.Notef(err, "cannot get %q", waitTokenURL)
- }
- defer httpResp.Body.Close()
- if httpResp.StatusCode != http.StatusOK {
- err := unmarshalError(httpResp)
- return nil, errgo.NoteMask(err, "cannot acquire discharge token", errgo.Any)
- }
- var resp WaitTokenResponse
- if err := httprequest.UnmarshalJSONResponse(httpResp, &resp); err != nil {
- return nil, errgo.Notef(err, "cannot unmarshal wait response")
- }
- tokenVal, err := maybeBase64Decode(resp.Token, resp.Token64)
- if err != nil {
- return nil, errgo.Notef(err, "bad discharge token")
- }
- // TODO check that kind and value are non-empty?
- return &DischargeToken{
- Kind: resp.Kind,
- Value: tokenVal,
- }, nil
-}
-
-// LegacyInteract implements LegacyInteractor by opening a web browser page.
-func (wi WebBrowserInteractor) LegacyInteract(ctx context.Context, client *Client, location string, visitURL *url.URL) error {
- if err := wi.openWebBrowser(visitURL); err != nil {
- return errgo.Mask(err)
- }
- return nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/checkers.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/checkers.go
deleted file mode 100644
index befc0e17..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/checkers.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "net"
- "net/http"
-
- "gopkg.in/errgo.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-type httpRequestKey struct{}
-
-// ContextWithRequest returns the context with information from the
-// given request attached as context. This is used by the httpbakery
-// checkers (see RegisterCheckers for details).
-func ContextWithRequest(ctx context.Context, req *http.Request) context.Context {
- return context.WithValue(ctx, httpRequestKey{}, req)
-}
-
-func requestFromContext(ctx context.Context) *http.Request {
- req, _ := ctx.Value(httpRequestKey{}).(*http.Request)
- return req
-}
-
-const (
- // CondClientIPAddr holds the first party caveat condition
- // that checks a client's IP address.
- CondClientIPAddr = "client-ip-addr"
-
- // CondClientOrigin holds the first party caveat condition that
- // checks a client's origin header.
- CondClientOrigin = "origin"
-)
-
-// CheckersNamespace holds the URI of the HTTP checkers schema.
-const CheckersNamespace = "http"
-
-var allCheckers = map[string]checkers.Func{
- CondClientIPAddr: ipAddrCheck,
- CondClientOrigin: clientOriginCheck,
-}
-
-// RegisterCheckers registers all the HTTP checkers with the given checker.
-// Current checkers include:
-//
-// client-ip-addr
-//
-// The client-ip-addr caveat checks that the HTTP request has
-// the given remote IP address.
-//
-// origin
-//
-// The origin caveat checks that the HTTP Origin header has
-// the given value.
-func RegisterCheckers(c *checkers.Checker) {
- c.Namespace().Register(CheckersNamespace, "http")
- for cond, check := range allCheckers {
- c.Register(cond, CheckersNamespace, check)
- }
-}
-
-// NewChecker returns a new checker with the standard
-// and HTTP checkers registered in it.
-func NewChecker() *checkers.Checker {
- c := checkers.New(nil)
- RegisterCheckers(c)
- return c
-}
-
-// ipAddrCheck implements the IP client address checker
-// for an HTTP request.
-func ipAddrCheck(ctx context.Context, cond, args string) error {
- req := requestFromContext(ctx)
- if req == nil {
- return errgo.Newf("no IP address found in context")
- }
- ip := net.ParseIP(args)
- if ip == nil {
- return errgo.Newf("cannot parse IP address in caveat")
- }
- if req.RemoteAddr == "" {
- return errgo.Newf("client has no remote address")
- }
- reqIP, err := requestIPAddr(req)
- if err != nil {
- return errgo.Mask(err)
- }
- if !reqIP.Equal(ip) {
- return errgo.Newf("client IP address mismatch, got %s", reqIP)
- }
- return nil
-}
-
-// clientOriginCheck implements the Origin header checker
-// for an HTTP request.
-func clientOriginCheck(ctx context.Context, cond, args string) error {
- req := requestFromContext(ctx)
- if req == nil {
- return errgo.Newf("no origin found in context")
- }
- // Note that web browsers may not provide the origin header when it's
- // not a cross-site request with a GET method. There's nothing we
- // can do about that, so just allow all requests with an empty origin.
- if reqOrigin := req.Header.Get("Origin"); reqOrigin != "" && reqOrigin != args {
- return errgo.Newf("request has invalid Origin header; got %q", reqOrigin)
- }
- return nil
-}
-
-// SameClientIPAddrCaveat returns a caveat that will check that
-// the remote IP address is the same as that in the given HTTP request.
-func SameClientIPAddrCaveat(req *http.Request) checkers.Caveat {
- if req.RemoteAddr == "" {
- return checkers.ErrorCaveatf("client has no remote IP address")
- }
- ip, err := requestIPAddr(req)
- if err != nil {
- return checkers.ErrorCaveatf("%v", err)
- }
- return ClientIPAddrCaveat(ip)
-}
-
-// ClientIPAddrCaveat returns a caveat that will check whether the
-// client's IP address is as provided.
-func ClientIPAddrCaveat(addr net.IP) checkers.Caveat {
- if len(addr) != net.IPv4len && len(addr) != net.IPv6len {
- return checkers.ErrorCaveatf("bad IP address %d", []byte(addr))
- }
- return httpCaveat(CondClientIPAddr, addr.String())
-}
-
-// ClientOriginCaveat returns a caveat that will check whether the
-// client's Origin header in its HTTP request is as provided.
-func ClientOriginCaveat(origin string) checkers.Caveat {
- return httpCaveat(CondClientOrigin, origin)
-}
-
-func httpCaveat(cond, arg string) checkers.Caveat {
- return checkers.Caveat{
- Condition: checkers.Condition(cond, arg),
- Namespace: CheckersNamespace,
- }
-}
-
-func requestIPAddr(req *http.Request) (net.IP, error) {
- reqHost, _, err := net.SplitHostPort(req.RemoteAddr)
- if err != nil {
- return nil, errgo.Newf("cannot parse host port in remote address: %v", err)
- }
- ip := net.ParseIP(reqHost)
- if ip == nil {
- return nil, errgo.Newf("invalid IP address in remote address %q", req.RemoteAddr)
- }
- return ip, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/client.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/client.go
deleted file mode 100644
index 212f57f0..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/client.go
+++ /dev/null
@@ -1,727 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "net/http"
- "net/http/cookiejar"
- "net/url"
- "strings"
- "time"
-
- "golang.org/x/net/publicsuffix"
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-var unmarshalError = httprequest.ErrorUnmarshaler(&Error{})
-
-// maxDischargeRetries holds the maximum number of times that an HTTP
-// request will be retried after a third party caveat has been successfully
-// discharged.
-const maxDischargeRetries = 3
-
-// DischargeError represents the error when a third party discharge
-// is refused by a server.
-type DischargeError struct {
- // Reason holds the underlying remote error that caused the
- // discharge to fail.
- Reason *Error
-}
-
-func (e *DischargeError) Error() string {
- return fmt.Sprintf("third party refused discharge: %v", e.Reason)
-}
-
-// IsDischargeError reports whether err is a *DischargeError.
-func IsDischargeError(err error) bool {
- _, ok := err.(*DischargeError)
- return ok
-}
-
-// InteractionError wraps an error returned by a call to visitWebPage.
-type InteractionError struct {
- // Reason holds the actual error returned from visitWebPage.
- Reason error
-}
-
-func (e *InteractionError) Error() string {
- return fmt.Sprintf("cannot start interactive session: %v", e.Reason)
-}
-
-// IsInteractionError reports whether err is an *InteractionError.
-func IsInteractionError(err error) bool {
- _, ok := err.(*InteractionError)
- return ok
-}
-
-// NewHTTPClient returns an http.Client that ensures
-// that headers are sent to the server even when the
-// server redirects a GET request. The returned client
-// also contains an empty in-memory cookie jar.
-//
-// See https://github.com/golang/go/issues/4677
-func NewHTTPClient() *http.Client {
- c := *http.DefaultClient
- c.CheckRedirect = func(req *http.Request, via []*http.Request) error {
- if len(via) >= 10 {
- return fmt.Errorf("too many redirects")
- }
- if len(via) == 0 {
- return nil
- }
- for attr, val := range via[0].Header {
- if attr == "Cookie" {
- // Cookies are added automatically anyway.
- continue
- }
- if _, ok := req.Header[attr]; !ok {
- req.Header[attr] = val
- }
- }
- return nil
- }
- jar, err := cookiejar.New(&cookiejar.Options{
- PublicSuffixList: publicsuffix.List,
- })
- if err != nil {
- panic(err)
- }
- c.Jar = jar
- return &c
-}
-
-// Client holds the context for making HTTP requests
-// that automatically acquire and discharge macaroons.
-type Client struct {
- // Client holds the HTTP client to use. It should have a cookie
- // jar configured, and when redirecting it should preserve the
- // headers (see NewHTTPClient).
- *http.Client
-
- // InteractionMethods holds a slice of supported interaction
- // methods, with preferred methods earlier in the slice.
- // On receiving an interaction-required error when discharging,
- // the Kind method of each Interactor in turn will be called
- // and, if the error indicates that the interaction kind is supported,
- // the Interact method will be called to complete the discharge.
- InteractionMethods []Interactor
-
- // Key holds the client's key. If set, the client will try to
- // discharge third party caveats with the special location
- // "local" by using this key. See bakery.DischargeAllWithKey and
- // bakery.LocalThirdPartyCaveat for more information
- Key *bakery.KeyPair
-
- // Logger is used to log information about client activities.
- // If it is nil, bakery.DefaultLogger("httpbakery") will be used.
- Logger bakery.Logger
-}
-
-// An Interactor represents a way of persuading a discharger
-// that it should grant a discharge macaroon.
-type Interactor interface {
- // Kind returns the interaction method name. This corresponds to the
- // key in the Error.InteractionMethods type.
- Kind() string
-
- // Interact performs the interaction, and returns a token that can be
- // used to acquire the discharge macaroon. The location provides
- // the third party caveat location to make it possible to use
- // relative URLs.
- //
- // If the given interaction isn't supported by the client for
- // the given location, it may return an error with an
- // ErrInteractionMethodNotFound cause which will cause the
- // interactor to be ignored that time.
- Interact(ctx context.Context, client *Client, location string, interactionRequiredErr *Error) (*DischargeToken, error)
-}
-
-// DischargeToken holds a token that is intended
-// to persuade a discharger to discharge a third
-// party caveat.
-type DischargeToken struct {
- // Kind holds the kind of the token. By convention this
- // matches the name of the interaction method used to
- // obtain the token, but that's not required.
- Kind string `json:"kind"`
-
- // Value holds the value of the token.
- Value []byte `json:"value"`
-}
-
-// LegacyInteractor may optionally be implemented by Interactor
-// implementations that implement the legacy interaction-required
-// error protocols.
-type LegacyInteractor interface {
- // LegacyInteract implements the "visit" half of a legacy discharge
- // interaction. The "wait" half will be implemented by httpbakery.
- // The location is the location specified by the third party
- // caveat.
- LegacyInteract(ctx context.Context, client *Client, location string, visitURL *url.URL) error
-}
-
-// NewClient returns a new Client containing an HTTP client
-// created with NewHTTPClient and leaves all other fields zero.
-func NewClient() *Client {
- return &Client{
- Client: NewHTTPClient(),
- }
-}
-
-// AddInteractor is a convenience method that appends the given
-// interactor to c.InteractionMethods.
-// For example, to enable web-browser interaction on
-// a client c, do:
-//
-// c.AddInteractor(httpbakery.WebBrowserWindowInteractor)
-func (c *Client) AddInteractor(i Interactor) {
- c.InteractionMethods = append(c.InteractionMethods, i)
-}
-
-// DischargeAll attempts to acquire discharge macaroons for all the
-// third party caveats in m, and returns a slice containing all
-// of them bound to m.
-//
-// If the discharge fails because a third party refuses to discharge a
-// caveat, the returned error will have a cause of type *DischargeError.
-// If the discharge fails because visitWebPage returns an error,
-// the returned error will have a cause of *InteractionError.
-//
-// The returned macaroon slice will not be stored in the client
-// cookie jar (see SetCookie if you need to do that).
-func (c *Client) DischargeAll(ctx context.Context, m *bakery.Macaroon) (macaroon.Slice, error) {
- return bakery.DischargeAllWithKey(ctx, m, c.AcquireDischarge, c.Key)
-}
-
-// DischargeAllUnbound is like DischargeAll except that it does not
-// bind the resulting macaroons.
-func (c *Client) DischargeAllUnbound(ctx context.Context, ms bakery.Slice) (bakery.Slice, error) {
- return ms.DischargeAll(ctx, c.AcquireDischarge, c.Key)
-}
-
-// Do is like DoWithContext, except the context is automatically derived.
-// If using go version 1.7 or later the context will be taken from the
-// given request, otherwise context.Background() will be used.
-func (c *Client) Do(req *http.Request) (*http.Response, error) {
- return c.do(contextFromRequest(req), req, nil)
-}
-
-// DoWithContext sends the given HTTP request and returns its response.
-// If the request fails with a discharge-required error, any required
-// discharge macaroons will be acquired, and the request will be repeated
-// with those attached.
-//
-// If the required discharges were refused by a third party, an error
-// with a *DischargeError cause will be returned.
-//
-// If interaction is required by the user, the client's InteractionMethods
-// will be used to perform interaction. An error
-// with a *InteractionError cause will be returned if this interaction
-// fails. See WebBrowserWindowInteractor for a possible implementation of
-// an Interactor for an interaction method.
-//
-// DoWithContext may add headers to req.Header.
-func (c *Client) DoWithContext(ctx context.Context, req *http.Request) (*http.Response, error) {
- return c.do(ctx, req, nil)
-}
-
-// DoWithCustomError is like Do except it allows a client
-// to specify a custom error function, getError, which is called on the
-// HTTP response and may return a non-nil error if the response holds an
-// error. If the cause of the returned error is a *Error value and its
-// code is ErrDischargeRequired, the macaroon in its Info field will be
-// discharged and the request will be repeated with the discharged
-// macaroon. If getError returns nil, it should leave the response body
-// unchanged.
-//
-// If getError is nil, DefaultGetError will be used.
-//
-// This method can be useful when dealing with APIs that
-// return their errors in a format incompatible with Error, but the
-// need for it should be avoided when creating new APIs,
-// as it makes the endpoints less amenable to generic tools.
-func (c *Client) DoWithCustomError(req *http.Request, getError func(resp *http.Response) error) (*http.Response, error) {
- return c.do(contextFromRequest(req), req, getError)
-}
-
-func (c *Client) do(ctx context.Context, req *http.Request, getError func(resp *http.Response) error) (*http.Response, error) {
- c.logDebugf(ctx, "client do %s %s {", req.Method, req.URL)
- resp, err := c.do1(ctx, req, getError)
- c.logDebugf(ctx, "} -> error %#v", err)
- return resp, err
-}
-
-func (c *Client) do1(ctx context.Context, req *http.Request, getError func(resp *http.Response) error) (*http.Response, error) {
- if getError == nil {
- getError = DefaultGetError
- }
- if c.Client.Jar == nil {
- return nil, errgo.New("no cookie jar supplied in HTTP client")
- }
- rreq, ok := newRetryableRequest(c.Client, req)
- if !ok {
- return nil, fmt.Errorf("request body is not seekable")
- }
- defer rreq.close()
-
- req.Header.Set(BakeryProtocolHeader, fmt.Sprint(bakery.LatestVersion))
-
- // Make several attempts to do the request, because we might have
- // to get through several layers of security. We only retry if
- // we get a DischargeRequiredError and succeed in discharging
- // the macaroon in it.
- retry := 0
- for {
- resp, err := c.do2(ctx, rreq, getError)
- if err == nil || !isDischargeRequiredError(err) {
- return resp, errgo.Mask(err, errgo.Any)
- }
- if retry++; retry > maxDischargeRetries {
- return nil, errgo.NoteMask(err, fmt.Sprintf("too many (%d) discharge requests", retry-1), errgo.Any)
- }
- if err1 := c.HandleError(ctx, req.URL, err); err1 != nil {
- return nil, errgo.Mask(err1, errgo.Any)
- }
- c.logDebugf(ctx, "discharge succeeded; retry %d", retry)
- }
-}
-
-func (c *Client) do2(ctx context.Context, rreq *retryableRequest, getError func(resp *http.Response) error) (*http.Response, error) {
- httpResp, err := rreq.do(ctx)
- if err != nil {
- return nil, errgo.Mask(err, errgo.Any)
- }
- err = getError(httpResp)
- if err == nil {
- c.logInfof(ctx, "HTTP response OK (status %v)", httpResp.Status)
- return httpResp, nil
- }
- httpResp.Body.Close()
- return nil, errgo.Mask(err, errgo.Any)
-}
-
-// HandleError tries to resolve the given error, which should be a
-// response to the given URL, by discharging any macaroon contained in
-// it. That is, if the error cause is an *Error and its code is
-// ErrDischargeRequired, then it will try to discharge
-// err.Info.Macaroon. If the discharge succeeds, the discharged macaroon
-// will be saved to the client's cookie jar and ResolveError will return
-// nil.
-//
-// For any other kind of error, the original error will be returned.
-func (c *Client) HandleError(ctx context.Context, reqURL *url.URL, err error) error {
- respErr, ok := errgo.Cause(err).(*Error)
- if !ok {
- return err
- }
- if respErr.Code != ErrDischargeRequired {
- return respErr
- }
- if respErr.Info == nil || respErr.Info.Macaroon == nil {
- return errgo.New("no macaroon found in discharge-required response")
- }
- mac := respErr.Info.Macaroon
- macaroons, err := bakery.DischargeAllWithKey(ctx, mac, c.AcquireDischarge, c.Key)
- if err != nil {
- return errgo.Mask(err, errgo.Any)
- }
- var cookiePath string
- if path := respErr.Info.MacaroonPath; path != "" {
- relURL, err := parseURLPath(path)
- if err != nil {
- c.logInfof(ctx, "ignoring invalid path in discharge-required response: %v", err)
- } else {
- cookiePath = reqURL.ResolveReference(relURL).Path
- }
- }
- // TODO use a namespace taken from the error response.
- cookie, err := NewCookie(nil, macaroons)
- if err != nil {
- return errgo.Notef(err, "cannot make cookie")
- }
- cookie.Path = cookiePath
- if name := respErr.Info.CookieNameSuffix; name != "" {
- cookie.Name = "macaroon-" + name
- }
- c.Jar.SetCookies(reqURL, []*http.Cookie{cookie})
- return nil
-}
-
-// DefaultGetError is the default error unmarshaler used by Client.Do.
-func DefaultGetError(httpResp *http.Response) error {
- if httpResp.StatusCode != http.StatusProxyAuthRequired && httpResp.StatusCode != http.StatusUnauthorized {
- return nil
- }
- // Check for the new protocol discharge error.
- if httpResp.StatusCode == http.StatusUnauthorized && httpResp.Header.Get("WWW-Authenticate") != "Macaroon" {
- return nil
- }
- if httpResp.Header.Get("Content-Type") != "application/json" {
- return nil
- }
- var resp Error
- if err := json.NewDecoder(httpResp.Body).Decode(&resp); err != nil {
- return fmt.Errorf("cannot unmarshal error response: %v", err)
- }
- return &resp
-}
-
-func parseURLPath(path string) (*url.URL, error) {
- u, err := url.Parse(path)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- if u.Scheme != "" ||
- u.Opaque != "" ||
- u.User != nil ||
- u.Host != "" ||
- u.RawQuery != "" ||
- u.Fragment != "" {
- return nil, errgo.Newf("URL path %q is not clean", path)
- }
- return u, nil
-}
-
-// PermanentExpiryDuration holds the length of time a cookie
-// holding a macaroon with no time-before caveat will be
-// stored.
-const PermanentExpiryDuration = 100 * 365 * 24 * time.Hour
-
-// NewCookie takes a slice of macaroons and returns them
-// encoded as a cookie. The slice should contain a single primary
-// macaroon in its first element, and any discharges after that.
-//
-// The given namespace specifies the first party caveat namespace,
-// used for deriving the expiry time of the cookie.
-func NewCookie(ns *checkers.Namespace, ms macaroon.Slice) (*http.Cookie, error) {
- if len(ms) == 0 {
- return nil, errgo.New("no macaroons in cookie")
- }
- // TODO(rog) marshal cookie as binary if version allows.
- data, err := json.Marshal(ms)
- if err != nil {
- return nil, errgo.Notef(err, "cannot marshal macaroons")
- }
- cookie := &http.Cookie{
- Name: fmt.Sprintf("macaroon-%x", ms[0].Signature()),
- Value: base64.StdEncoding.EncodeToString(data),
- }
- expires, found := checkers.MacaroonsExpiryTime(ns, ms)
- if !found {
- // The macaroon doesn't expire - use a very long expiry
- // time for the cookie.
- expires = time.Now().Add(PermanentExpiryDuration)
- } else if expires.Sub(time.Now()) < time.Minute {
- // The macaroon might have expired already, or it's
- // got a short duration, so treat it as a session cookie
- // by setting Expires to the zero time.
- expires = time.Time{}
- }
- cookie.Expires = expires
- // TODO(rog) other fields.
- return cookie, nil
-}
-
-// SetCookie sets a cookie for the given URL on the given cookie jar
-// that will holds the given macaroon slice. The macaroon slice should
-// contain a single primary macaroon in its first element, and any
-// discharges after that.
-//
-// The given namespace specifies the first party caveat namespace,
-// used for deriving the expiry time of the cookie.
-func SetCookie(jar http.CookieJar, url *url.URL, ns *checkers.Namespace, ms macaroon.Slice) error {
- cookie, err := NewCookie(ns, ms)
- if err != nil {
- return errgo.Mask(err)
- }
- jar.SetCookies(url, []*http.Cookie{cookie})
- return nil
-}
-
-// MacaroonsForURL returns any macaroons associated with the
-// given URL in the given cookie jar.
-func MacaroonsForURL(jar http.CookieJar, u *url.URL) []macaroon.Slice {
- return cookiesToMacaroons(jar.Cookies(u))
-}
-
-func appendURLElem(u, elem string) string {
- if strings.HasSuffix(u, "/") {
- return u + elem
- }
- return u + "/" + elem
-}
-
-// AcquireDischarge acquires a discharge macaroon from the caveat location as an HTTP URL.
-// It fits the getDischarge argument type required by bakery.DischargeAll.
-func (c *Client) AcquireDischarge(ctx context.Context, cav macaroon.Caveat, payload []byte) (*bakery.Macaroon, error) {
- m, err := c.acquireDischarge(ctx, cav, payload, nil)
- if err == nil {
- return m, nil
- }
- cause, ok := errgo.Cause(err).(*Error)
- if !ok {
- return nil, errgo.NoteMask(err, "cannot acquire discharge", IsInteractionError)
- }
- if cause.Code != ErrInteractionRequired {
- return nil, &DischargeError{
- Reason: cause,
- }
- }
- if cause.Info == nil {
- return nil, errgo.Notef(err, "interaction-required response with no info")
- }
- // Make sure the location has a trailing slash so that
- // the relative URL calculations work correctly even when
- // cav.Location doesn't have a trailing slash.
- loc := appendURLElem(cav.Location, "")
- token, m, err := c.interact(ctx, loc, cause, payload)
- if err != nil {
- return nil, errgo.Mask(err, IsDischargeError, IsInteractionError)
- }
- if m != nil {
- // We've acquired the macaroon directly via legacy interaction.
- return m, nil
- }
-
- // Try to acquire the discharge again, but this time with
- // the token acquired by the interaction method.
- m, err = c.acquireDischarge(ctx, cav, payload, token)
- if err != nil {
- return nil, errgo.Mask(err, IsDischargeError, IsInteractionError)
- }
- return m, nil
-}
-
-// acquireDischarge is like AcquireDischarge except that it also
-// takes a token acquired from an interaction method.
-func (c *Client) acquireDischarge(
- ctx context.Context,
- cav macaroon.Caveat,
- payload []byte,
- token *DischargeToken,
-) (*bakery.Macaroon, error) {
- dclient := newDischargeClient(cav.Location, c)
- var req dischargeRequest
- req.Id, req.Id64 = maybeBase64Encode(cav.Id)
- if token != nil {
- req.Token, req.Token64 = maybeBase64Encode(token.Value)
- req.TokenKind = token.Kind
- }
- req.Caveat = base64.RawURLEncoding.EncodeToString(payload)
- resp, err := dclient.Discharge(ctx, &req)
- if err == nil {
- return resp.Macaroon, nil
- }
- return nil, errgo.Mask(err, errgo.Any)
-}
-
-// interact gathers a macaroon by directing the user to interact with a
-// web page. The irErr argument holds the interaction-required
-// error response.
-func (c *Client) interact(ctx context.Context, location string, irErr *Error, payload []byte) (*DischargeToken, *bakery.Macaroon, error) {
- if len(c.InteractionMethods) == 0 {
- return nil, nil, &InteractionError{
- Reason: errgo.New("interaction required but not possible"),
- }
- }
- if irErr.Info.InteractionMethods == nil && irErr.Info.LegacyVisitURL != "" {
- // It's an old-style error; deal with it differently.
- m, err := c.legacyInteract(ctx, location, irErr)
- if err != nil {
- return nil, nil, errgo.Mask(err, IsDischargeError, IsInteractionError)
- }
- return nil, m, nil
- }
- for _, interactor := range c.InteractionMethods {
- c.logDebugf(ctx, "checking interaction method %q", interactor.Kind())
- if _, ok := irErr.Info.InteractionMethods[interactor.Kind()]; ok {
- c.logDebugf(ctx, "found possible interaction method %q", interactor.Kind())
- token, err := interactor.Interact(ctx, c, location, irErr)
- if err != nil {
- if errgo.Cause(err) == ErrInteractionMethodNotFound {
- continue
- }
- return nil, nil, errgo.Mask(err, IsDischargeError, IsInteractionError)
- }
- if token == nil {
- return nil, nil, errgo.New("interaction method returned an empty token")
- }
- return token, nil, nil
- } else {
- c.logDebugf(ctx, "interaction method %q not found in %#v", interactor.Kind(), irErr.Info.InteractionMethods)
- }
- }
- return nil, nil, &InteractionError{
- Reason: errgo.Newf("no supported interaction method"),
- }
-}
-
-func (c *Client) legacyInteract(ctx context.Context, location string, irErr *Error) (*bakery.Macaroon, error) {
- visitURL, err := relativeURL(location, irErr.Info.LegacyVisitURL)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- waitURL, err := relativeURL(location, irErr.Info.LegacyWaitURL)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- methodURLs := map[string]*url.URL{
- "interactive": visitURL,
- }
- if len(c.InteractionMethods) > 1 || c.InteractionMethods[0].Kind() != WebBrowserInteractionKind {
- // We have several possible methods or we only support a non-window
- // method, so we need to fetch the possible methods supported by the discharger.
- methodURLs = legacyGetInteractionMethods(ctx, c.logger(), c, visitURL)
- }
- for _, interactor := range c.InteractionMethods {
- kind := interactor.Kind()
- if kind == WebBrowserInteractionKind {
- // This is the old name for browser-window interaction.
- kind = "interactive"
- }
- interactor, ok := interactor.(LegacyInteractor)
- if !ok {
- // Legacy interaction mode isn't supported.
- continue
- }
- visitURL, ok := methodURLs[kind]
- if !ok {
- continue
- }
- visitURL, err := relativeURL(location, visitURL.String())
- if err != nil {
- return nil, errgo.Mask(err)
- }
- if err := interactor.LegacyInteract(ctx, c, location, visitURL); err != nil {
- return nil, &InteractionError{
- Reason: errgo.Mask(err, errgo.Any),
- }
- }
- return waitForMacaroon(ctx, c, waitURL)
- }
- return nil, &InteractionError{
- Reason: errgo.Newf("no methods supported"),
- }
-}
-
-func (c *Client) logDebugf(ctx context.Context, f string, a ...interface{}) {
- c.logger().Debugf(ctx, f, a...)
-}
-
-func (c *Client) logInfof(ctx context.Context, f string, a ...interface{}) {
- c.logger().Infof(ctx, f, a...)
-}
-
-func (c *Client) logger() bakery.Logger {
- if c.Logger != nil {
- return c.Logger
- }
- return bakery.DefaultLogger("httpbakery")
-}
-
-// waitForMacaroon returns a macaroon from a legacy wait endpoint.
-func waitForMacaroon(ctx context.Context, client *Client, waitURL *url.URL) (*bakery.Macaroon, error) {
- req, err := http.NewRequest("GET", waitURL.String(), nil)
- if err != nil {
- return nil, errgo.Mask(err)
- }
- req = req.WithContext(ctx)
- httpResp, err := client.Client.Do(req)
- if err != nil {
- return nil, errgo.Notef(err, "cannot get %q", waitURL)
- }
- defer httpResp.Body.Close()
- if httpResp.StatusCode != http.StatusOK {
- err := unmarshalError(httpResp)
- if err1, ok := err.(*Error); ok {
- err = &DischargeError{
- Reason: err1,
- }
- }
- return nil, errgo.NoteMask(err, "failed to acquire macaroon after waiting", errgo.Any)
- }
- var resp WaitResponse
- if err := httprequest.UnmarshalJSONResponse(httpResp, &resp); err != nil {
- return nil, errgo.Notef(err, "cannot unmarshal wait response")
- }
- return resp.Macaroon, nil
-}
-
-// relativeURL returns newPath relative to an original URL.
-func relativeURL(base, new string) (*url.URL, error) {
- if new == "" {
- return nil, errgo.Newf("empty URL")
- }
- baseURL, err := url.Parse(base)
- if err != nil {
- return nil, errgo.Notef(err, "cannot parse URL")
- }
- newURL, err := url.Parse(new)
- if err != nil {
- return nil, errgo.Notef(err, "cannot parse URL")
- }
- return baseURL.ResolveReference(newURL), nil
-}
-
-// TODO(rog) move a lot of the code below into server.go, as it's
-// much more about server side than client side.
-
-// MacaroonsHeader is the key of the HTTP header that can be used to provide a
-// macaroon for request authorization.
-const MacaroonsHeader = "Macaroons"
-
-// RequestMacaroons returns any collections of macaroons from the header and
-// cookies found in the request. By convention, each slice will contain a
-// primary macaroon followed by its discharges.
-func RequestMacaroons(req *http.Request) []macaroon.Slice {
- mss := cookiesToMacaroons(req.Cookies())
- for _, h := range req.Header[MacaroonsHeader] {
- ms, err := decodeMacaroonSlice(h)
- if err != nil {
- // Ignore invalid macaroons.
- continue
- }
- mss = append(mss, ms)
- }
- return mss
-}
-
-// cookiesToMacaroons returns a slice of any macaroons found
-// in the given slice of cookies.
-func cookiesToMacaroons(cookies []*http.Cookie) []macaroon.Slice {
- var mss []macaroon.Slice
- for _, cookie := range cookies {
- if !strings.HasPrefix(cookie.Name, "macaroon-") {
- continue
- }
- ms, err := decodeMacaroonSlice(cookie.Value)
- if err != nil {
- // Ignore invalid macaroons.
- continue
- }
- mss = append(mss, ms)
- }
- return mss
-}
-
-// decodeMacaroonSlice decodes a base64-JSON-encoded slice of macaroons from
-// the given string.
-func decodeMacaroonSlice(value string) (macaroon.Slice, error) {
- data, err := macaroon.Base64Decode([]byte(value))
- if err != nil {
- return nil, errgo.NoteMask(err, "cannot base64-decode macaroons")
- }
- // TODO(rog) accept binary encoded macaroon cookies.
- var ms macaroon.Slice
- if err := json.Unmarshal(data, &ms); err != nil {
- return nil, errgo.NoteMask(err, "cannot unmarshal macaroons")
- }
- return ms, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_go17.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_go17.go
deleted file mode 100644
index 6ae98530..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_go17.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build go1.7
-
-package httpbakery
-
-import (
- "context"
- "net/http"
-)
-
-func contextFromRequest(req *http.Request) context.Context {
- return req.Context()
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_prego17.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_prego17.go
deleted file mode 100644
index aecca0d3..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/context_prego17.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !go1.7
-
-package httpbakery
-
-import (
- "context"
- "net/http"
-)
-
-func contextFromRequest(req *http.Request) context.Context {
- return context.Background()
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/discharge.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/discharge.go
deleted file mode 100644
index fa88bfa1..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/discharge.go
+++ /dev/null
@@ -1,367 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "encoding/base64"
- "net/http"
- "path"
- "unicode/utf8"
-
- "github.com/julienschmidt/httprouter"
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
- "gopkg.in/macaroon.v2"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// ThirdPartyCaveatChecker is used to check third party caveats.
-// This interface is deprecated and included only for backward
-// compatibility; ThirdPartyCaveatCheckerP should be used instead.
-type ThirdPartyCaveatChecker interface {
- // CheckThirdPartyCaveat is like ThirdPartyCaveatCheckerP.CheckThirdPartyCaveat
- // except that it uses separate arguments instead of a struct arg.
- CheckThirdPartyCaveat(ctx context.Context, info *bakery.ThirdPartyCaveatInfo, req *http.Request, token *DischargeToken) ([]checkers.Caveat, error)
-}
-
-// ThirdPartyCaveatCheckerP is used to check third party caveats.
-// The "P" stands for "Params" - this was added after ThirdPartyCaveatChecker
-// which can't be removed without breaking backwards compatibility.
-type ThirdPartyCaveatCheckerP interface {
- // CheckThirdPartyCaveat is used to check whether a client
- // making the given request should be allowed a discharge for
- // the p.Info.Condition. On success, the caveat will be discharged,
- // with any returned caveats also added to the discharge
- // macaroon.
- //
- // The p.Token field, if non-nil, is a token obtained from
- // Interactor.Interact as the result of a discharge interaction
- // after an interaction required error.
- //
- // Note than when used in the context of a discharge handler
- // created by Discharger, any returned errors will be marshaled
- // as documented in DischargeHandler.ErrorMapper.
- CheckThirdPartyCaveat(ctx context.Context, p ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error)
-}
-
-// ThirdPartyCaveatCheckerParams holds the parameters passed to
-// CheckThirdPartyCaveatP.
-type ThirdPartyCaveatCheckerParams struct {
- // Caveat holds information about the caveat being discharged.
- Caveat *bakery.ThirdPartyCaveatInfo
-
- // Token holds the discharge token provided by the client, if any.
- Token *DischargeToken
-
- // Req holds the HTTP discharge request.
- Request *http.Request
-
- // Response holds the HTTP response writer. Implementations
- // must not call its WriteHeader or Write methods.
- Response http.ResponseWriter
-}
-
-// ThirdPartyCaveatCheckerFunc implements ThirdPartyCaveatChecker
-// by calling a function.
-type ThirdPartyCaveatCheckerFunc func(ctx context.Context, req *http.Request, info *bakery.ThirdPartyCaveatInfo, token *DischargeToken) ([]checkers.Caveat, error)
-
-func (f ThirdPartyCaveatCheckerFunc) CheckThirdPartyCaveat(ctx context.Context, info *bakery.ThirdPartyCaveatInfo, req *http.Request, token *DischargeToken) ([]checkers.Caveat, error) {
- return f(ctx, req, info, token)
-}
-
-// ThirdPartyCaveatCheckerPFunc implements ThirdPartyCaveatCheckerP
-// by calling a function.
-type ThirdPartyCaveatCheckerPFunc func(ctx context.Context, p ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error)
-
-func (f ThirdPartyCaveatCheckerPFunc) CheckThirdPartyCaveat(ctx context.Context, p ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error) {
- return f(ctx, p)
-}
-
-// newDischargeClient returns a discharge client that addresses the
-// third party discharger at the given location URL and uses
-// the given client to make HTTP requests.
-//
-// If client is nil, http.DefaultClient is used.
-func newDischargeClient(location string, client httprequest.Doer) *dischargeClient {
- if client == nil {
- client = http.DefaultClient
- }
- return &dischargeClient{
- Client: httprequest.Client{
- BaseURL: location,
- Doer: client,
- UnmarshalError: unmarshalError,
- },
- }
-}
-
-// Discharger holds parameters for creating a new Discharger.
-type DischargerParams struct {
- // CheckerP is used to actually check the caveats.
- // This will be used in preference to Checker.
- CheckerP ThirdPartyCaveatCheckerP
-
- // Checker is used to actually check the caveats.
- // This should be considered deprecated and will be ignored if CheckerP is set.
- Checker ThirdPartyCaveatChecker
-
- // Key holds the key pair of the discharger.
- Key *bakery.KeyPair
-
- // Locator is used to find public keys when adding
- // third-party caveats on discharge macaroons.
- // If this is nil, no third party caveats may be added.
- Locator bakery.ThirdPartyLocator
-
- // ErrorToResponse is used to convert errors returned by the third
- // party caveat checker to the form that will be JSON-marshaled
- // on the wire. If zero, this defaults to ErrorToResponse.
- // If set, it should handle errors that it does not understand
- // by falling back to calling ErrorToResponse to ensure
- // that the standard bakery errors are marshaled in the expected way.
- ErrorToResponse func(ctx context.Context, err error) (int, interface{})
-}
-
-// Discharger represents a third-party caveat discharger.
-// can discharge caveats in an HTTP server.
-//
-// The name space served by dischargers is as follows.
-// All parameters can be provided either as URL attributes
-// or form attributes. The result is always formatted as a JSON
-// object.
-//
-// On failure, all endpoints return an error described by
-// the Error type.
-//
-// POST /discharge
-// params:
-// id: all-UTF-8 third party caveat id
-// id64: non-padded URL-base64 encoded caveat id
-// macaroon-id: (optional) id to give to discharge macaroon (defaults to id)
-// token: (optional) value of discharge token
-// token64: (optional) base64-encoded value of discharge token.
-// token-kind: (mandatory if token or token64 provided) discharge token kind.
-// result on success (http.StatusOK):
-// {
-// Macaroon *macaroon.Macaroon
-// }
-//
-// GET /publickey
-// result:
-// public key of service
-// expiry time of key
-type Discharger struct {
- p DischargerParams
-}
-
-// NewDischarger returns a new third-party caveat discharger
-// using the given parameters.
-func NewDischarger(p DischargerParams) *Discharger {
- if p.ErrorToResponse == nil {
- p.ErrorToResponse = ErrorToResponse
- }
- if p.Locator == nil {
- p.Locator = emptyLocator{}
- }
- if p.CheckerP == nil {
- p.CheckerP = ThirdPartyCaveatCheckerPFunc(func(ctx context.Context, cp ThirdPartyCaveatCheckerParams) ([]checkers.Caveat, error) {
- return p.Checker.CheckThirdPartyCaveat(ctx, cp.Caveat, cp.Request, cp.Token)
- })
- }
- return &Discharger{
- p: p,
- }
-}
-
-type emptyLocator struct{}
-
-func (emptyLocator) ThirdPartyInfo(ctx context.Context, loc string) (bakery.ThirdPartyInfo, error) {
- return bakery.ThirdPartyInfo{}, bakery.ErrNotFound
-}
-
-// AddMuxHandlers adds handlers to the given ServeMux to provide
-// a third-party caveat discharge service.
-func (d *Discharger) AddMuxHandlers(mux *http.ServeMux, rootPath string) {
- for _, h := range d.Handlers() {
- // Note: this only works because we don't have any wildcard
- // patterns in the discharger paths.
- mux.Handle(path.Join(rootPath, h.Path), mkHTTPHandler(h.Handle))
- }
-}
-
-// Handlers returns a slice of handlers that can handle a third-party
-// caveat discharge service when added to an httprouter.Router.
-// TODO provide some way of customizing the context so that
-// ErrorToResponse can see a request-specific context.
-func (d *Discharger) Handlers() []httprequest.Handler {
- f := func(p httprequest.Params) (dischargeHandler, context.Context, error) {
- return dischargeHandler{
- discharger: d,
- }, p.Context, nil
- }
- srv := httprequest.Server{
- ErrorMapper: d.p.ErrorToResponse,
- }
- return srv.Handlers(f)
-}
-
-//go:generate httprequest-generate-client github.com/go-macaroon-bakery/macaroon-bakery/v3-unstable/httpbakery dischargeHandler dischargeClient
-
-// dischargeHandler is the type used to define the httprequest handler
-// methods for a discharger.
-type dischargeHandler struct {
- discharger *Discharger
-}
-
-// dischargeRequest is a request to create a macaroon that discharges the
-// supplied third-party caveat. Discharging caveats will normally be
-// handled by the bakery it would be unusual to use this type directly in
-// client software.
-type dischargeRequest struct {
- httprequest.Route `httprequest:"POST /discharge"`
- Id string `httprequest:"id,form,omitempty"`
- Id64 string `httprequest:"id64,form,omitempty"`
- Caveat string `httprequest:"caveat64,form,omitempty"`
- Token string `httprequest:"token,form,omitempty"`
- Token64 string `httprequest:"token64,form,omitempty"`
- TokenKind string `httprequest:"token-kind,form,omitempty"`
-}
-
-// dischargeResponse contains the response from a /discharge POST request.
-type dischargeResponse struct {
- Macaroon *bakery.Macaroon `json:",omitempty"`
-}
-
-// Discharge discharges a third party caveat.
-func (h dischargeHandler) Discharge(p httprequest.Params, r *dischargeRequest) (*dischargeResponse, error) {
- id, err := maybeBase64Decode(r.Id, r.Id64)
- if err != nil {
- return nil, errgo.Notef(err, "bad caveat id")
- }
- var caveat []byte
- if r.Caveat != "" {
- // Note that it's important that when r.Caveat is empty,
- // we leave DischargeParams.Caveat as nil (Base64Decode
- // always returns a non-nil byte slice).
- caveat1, err := macaroon.Base64Decode([]byte(r.Caveat))
- if err != nil {
- return nil, errgo.Notef(err, "bad base64-encoded caveat: %v", err)
- }
- caveat = caveat1
- }
- tokenVal, err := maybeBase64Decode(r.Token, r.Token64)
- if err != nil {
- return nil, errgo.Notef(err, "bad discharge token")
- }
- var token *DischargeToken
- if len(tokenVal) != 0 {
- if r.TokenKind == "" {
- return nil, errgo.Notef(err, "discharge token provided without token kind")
- }
- token = &DischargeToken{
- Kind: r.TokenKind,
- Value: tokenVal,
- }
- }
- m, err := bakery.Discharge(p.Context, bakery.DischargeParams{
- Id: id,
- Caveat: caveat,
- Key: h.discharger.p.Key,
- Checker: bakery.ThirdPartyCaveatCheckerFunc(
- func(ctx context.Context, cav *bakery.ThirdPartyCaveatInfo) ([]checkers.Caveat, error) {
- return h.discharger.p.CheckerP.CheckThirdPartyCaveat(ctx, ThirdPartyCaveatCheckerParams{
- Caveat: cav,
- Request: p.Request,
- Response: p.Response,
- Token: token,
- })
- },
- ),
- Locator: h.discharger.p.Locator,
- })
- if err != nil {
- return nil, errgo.NoteMask(err, "cannot discharge", errgo.Any)
- }
- return &dischargeResponse{m}, nil
-}
-
-// publicKeyRequest specifies the /publickey endpoint.
-type publicKeyRequest struct {
- httprequest.Route `httprequest:"GET /publickey"`
-}
-
-// publicKeyResponse is the response to a /publickey GET request.
-type publicKeyResponse struct {
- PublicKey *bakery.PublicKey
-}
-
-// dischargeInfoRequest specifies the /discharge/info endpoint.
-type dischargeInfoRequest struct {
- httprequest.Route `httprequest:"GET /discharge/info"`
-}
-
-// dischargeInfoResponse is the response to a /discharge/info GET
-// request.
-type dischargeInfoResponse struct {
- PublicKey *bakery.PublicKey
- Version bakery.Version
-}
-
-// PublicKey returns the public key of the discharge service.
-func (h dischargeHandler) PublicKey(*publicKeyRequest) (publicKeyResponse, error) {
- return publicKeyResponse{
- PublicKey: &h.discharger.p.Key.Public,
- }, nil
-}
-
-// DischargeInfo returns information on the discharger.
-func (h dischargeHandler) DischargeInfo(*dischargeInfoRequest) (dischargeInfoResponse, error) {
- return dischargeInfoResponse{
- PublicKey: &h.discharger.p.Key.Public,
- Version: bakery.LatestVersion,
- }, nil
-}
-
-// mkHTTPHandler converts an httprouter handler to an http.Handler,
-// assuming that the httprouter handler has no wildcard path
-// parameters.
-func mkHTTPHandler(h httprouter.Handle) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- h(w, req, nil)
- })
-}
-
-// maybeBase64Encode encodes b as is if it's
-// OK to be passed as a URL form parameter,
-// or encoded as base64 otherwise.
-func maybeBase64Encode(b []byte) (s, s64 string) {
- if utf8.Valid(b) {
- valid := true
- for _, c := range b {
- if c < 32 || c == 127 {
- valid = false
- break
- }
- }
- if valid {
- return string(b), ""
- }
- }
- return "", base64.RawURLEncoding.EncodeToString(b)
-}
-
-// maybeBase64Decode implements the inverse of maybeBase64Encode.
-func maybeBase64Decode(s, s64 string) ([]byte, error) {
- if s64 != "" {
- data, err := macaroon.Base64Decode([]byte(s64))
- if err != nil {
- return nil, errgo.Mask(err)
- }
- if len(data) == 0 {
- return nil, nil
- }
- return data, nil
- }
- return []byte(s), nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/dischargeclient_generated.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/dischargeclient_generated.go
deleted file mode 100644
index 3a738f38..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/dischargeclient_generated.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// The code in this file was automatically generated by running httprequest-generate-client.
-// DO NOT EDIT
-
-package httpbakery
-
-import (
- "context"
-
- "gopkg.in/httprequest.v1"
-)
-
-type dischargeClient struct {
- Client httprequest.Client
-}
-
-// Discharge discharges a third party caveat.
-func (c *dischargeClient) Discharge(ctx context.Context, p *dischargeRequest) (*dischargeResponse, error) {
- var r *dischargeResponse
- err := c.Client.Call(ctx, p, &r)
- return r, err
-}
-
-// DischargeInfo returns information on the discharger.
-func (c *dischargeClient) DischargeInfo(ctx context.Context, p *dischargeInfoRequest) (dischargeInfoResponse, error) {
- var r dischargeInfoResponse
- err := c.Client.Call(ctx, p, &r)
- return r, err
-}
-
-// PublicKey returns the public key of the discharge service.
-func (c *dischargeClient) PublicKey(ctx context.Context, p *publicKeyRequest) (publicKeyResponse, error) {
- var r publicKeyResponse
- err := c.Client.Call(ctx, p, &r)
- return r, err
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/error.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/error.go
deleted file mode 100644
index 0ccc0794..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/error.go
+++ /dev/null
@@ -1,359 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/http"
- "strconv"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil"
-)
-
-// ErrorCode holds an error code that classifies
-// an error returned from a bakery HTTP handler.
-type ErrorCode string
-
-func (e ErrorCode) Error() string {
- return string(e)
-}
-
-func (e ErrorCode) ErrorCode() ErrorCode {
- return e
-}
-
-const (
- ErrBadRequest = ErrorCode("bad request")
- ErrDischargeRequired = ErrorCode("macaroon discharge required")
- ErrInteractionRequired = ErrorCode("interaction required")
- ErrInteractionMethodNotFound = ErrorCode("discharger does not provide an supported interaction method")
- ErrPermissionDenied = ErrorCode("permission denied")
-)
-
-var httpReqServer = httprequest.Server{
- ErrorMapper: ErrorToResponse,
-}
-
-// WriteError writes the given bakery error to w.
-func WriteError(ctx context.Context, w http.ResponseWriter, err error) {
- httpReqServer.WriteError(ctx, w, err)
-}
-
-// Error holds the type of a response from an httpbakery HTTP request,
-// marshaled as JSON.
-//
-// Note: Do not construct Error values with ErrDischargeRequired or
-// ErrInteractionRequired codes directly - use the
-// NewDischargeRequiredError or NewInteractionRequiredError
-// functions instead.
-type Error struct {
- Code ErrorCode `json:",omitempty"`
- Message string `json:",omitempty"`
- Info *ErrorInfo `json:",omitempty"`
-
- // version holds the protocol version that was used
- // to create the error (see NewDischargeRequiredError).
- version bakery.Version
-}
-
-// ErrorInfo holds additional information provided
-// by an error.
-type ErrorInfo struct {
- // Macaroon may hold a macaroon that, when
- // discharged, may allow access to a service.
- // This field is associated with the ErrDischargeRequired
- // error code.
- Macaroon *bakery.Macaroon `json:",omitempty"`
-
- // MacaroonPath holds the URL path to be associated
- // with the macaroon. The macaroon is potentially
- // valid for all URLs under the given path.
- // If it is empty, the macaroon will be associated with
- // the original URL from which the error was returned.
- MacaroonPath string `json:",omitempty"`
-
- // CookieNameSuffix holds the desired cookie name suffix to be
- // associated with the macaroon. The actual name used will be
- // ("macaroon-" + CookieName). Clients may ignore this field -
- // older clients will always use ("macaroon-" +
- // macaroon.Signature() in hex).
- CookieNameSuffix string `json:",omitempty"`
-
- // The following fields are associated with the
- // ErrInteractionRequired error code.
-
- // InteractionMethods holds the set of methods that the
- // third party supports for completing the discharge.
- // See InteractionMethod for a more convenient
- // accessor method.
- InteractionMethods map[string]*json.RawMessage `json:",omitempty"`
-
- // LegacyVisitURL holds a URL that the client should visit
- // in a web browser to authenticate themselves.
- // This is deprecated - it is superceded by the InteractionMethods
- // field.
- LegacyVisitURL string `json:"VisitURL,omitempty"`
-
- // LegacyWaitURL holds a URL that the client should visit
- // to acquire the discharge macaroon. A GET on
- // this URL will block until the client has authenticated,
- // and then it will return the discharge macaroon.
- // This is deprecated - it is superceded by the InteractionMethods
- // field.
- LegacyWaitURL string `json:"WaitURL,omitempty"`
-}
-
-// SetInteraction sets the information for a particular
-// interaction kind to v. The error should be an interaction-required
-// error. This method will panic if v cannot be JSON-marshaled.
-// It is expected that interaction implementations will
-// implement type-safe wrappers for this method,
-// so you should not need to call it directly.
-func (e *Error) SetInteraction(kind string, v interface{}) {
- if e.Info == nil {
- e.Info = new(ErrorInfo)
- }
- if e.Info.InteractionMethods == nil {
- e.Info.InteractionMethods = make(map[string]*json.RawMessage)
- }
- data, err := json.Marshal(v)
- if err != nil {
- panic(err)
- }
- m := json.RawMessage(data)
- e.Info.InteractionMethods[kind] = &m
-}
-
-// InteractionMethod checks whether the error is an InteractionRequired error
-// that implements the method with the given name, and JSON-unmarshals the
-// method-specific data into x.
-func (e *Error) InteractionMethod(kind string, x interface{}) error {
- if e.Info == nil || e.Code != ErrInteractionRequired {
- return errgo.Newf("not an interaction-required error (code %v)", e.Code)
- }
- entry := e.Info.InteractionMethods[kind]
- if entry == nil {
- return errgo.WithCausef(nil, ErrInteractionMethodNotFound, "interaction method %q not found", kind)
- }
- if err := json.Unmarshal(*entry, x); err != nil {
- return errgo.Notef(err, "cannot unmarshal data for interaction method %q", kind)
- }
- return nil
-}
-
-func (e *Error) Error() string {
- return e.Message
-}
-
-func (e *Error) ErrorCode() ErrorCode {
- return e.Code
-}
-
-// ErrorInfo returns additional information
-// about the error.
-// TODO return interface{} here?
-func (e *Error) ErrorInfo() *ErrorInfo {
- return e.Info
-}
-
-// ErrorToResponse returns the HTTP status and an error body to be
-// marshaled as JSON for the given error. This allows a third party
-// package to integrate bakery errors into their error responses when
-// they encounter an error with a *bakery.Error cause.
-func ErrorToResponse(ctx context.Context, err error) (int, interface{}) {
- errorBody := errorResponseBody(err)
- var body interface{} = errorBody
- status := http.StatusInternalServerError
- switch errorBody.Code {
- case ErrBadRequest:
- status = http.StatusBadRequest
- case ErrPermissionDenied:
- status = http.StatusUnauthorized
- case ErrDischargeRequired, ErrInteractionRequired:
- switch errorBody.version {
- case bakery.Version0:
- status = http.StatusProxyAuthRequired
- case bakery.Version1, bakery.Version2, bakery.Version3:
- status = http.StatusUnauthorized
- body = httprequest.CustomHeader{
- Body: body,
- SetHeaderFunc: setAuthenticateHeader,
- }
- default:
- panic(fmt.Sprintf("out of range version number %v", errorBody.version))
- }
- }
- return status, body
-}
-
-func setAuthenticateHeader(h http.Header) {
- h.Set("WWW-Authenticate", "Macaroon")
-}
-
-type errorInfoer interface {
- ErrorInfo() *ErrorInfo
-}
-
-type errorCoder interface {
- ErrorCode() ErrorCode
-}
-
-// errorResponse returns an appropriate error
-// response for the provided error.
-func errorResponseBody(err error) *Error {
- var errResp Error
- cause := errgo.Cause(err)
- if cause, ok := cause.(*Error); ok {
- // It's an Error already. Preserve the wrapped
- // error message but copy everything else.
- errResp = *cause
- errResp.Message = err.Error()
- return &errResp
- }
-
- // It's not an error. Preserve as much info as
- // we can find.
- errResp.Message = err.Error()
- if coder, ok := cause.(errorCoder); ok {
- errResp.Code = coder.ErrorCode()
- }
- if infoer, ok := cause.(errorInfoer); ok {
- errResp.Info = infoer.ErrorInfo()
- }
- return &errResp
-}
-
-// NewInteractionRequiredError returns an error of type *Error
-// that requests an interaction from the client in response
-// to the given request. The originalErr value describes the original
-// error - if it is nil, a default message will be provided.
-//
-// This function should be used in preference to creating the Error value
-// directly, as it sets the bakery protocol version correctly in the error.
-//
-// The returned error does not support any interaction kinds.
-// Use kind-specific SetInteraction methods (for example
-// WebBrowserInteractor.SetInteraction) to add supported
-// interaction kinds.
-//
-// Note that WebBrowserInteractor.SetInteraction should always be called
-// for legacy clients to maintain backwards compatibility.
-func NewInteractionRequiredError(originalErr error, req *http.Request) *Error {
- if originalErr == nil {
- originalErr = ErrInteractionRequired
- }
- return &Error{
- Message: originalErr.Error(),
- version: RequestVersion(req),
- Code: ErrInteractionRequired,
- }
-}
-
-type DischargeRequiredErrorParams struct {
- // Macaroon holds the macaroon that needs to be discharged
- // by the client.
- Macaroon *bakery.Macaroon
-
- // OriginalError holds the reason that the discharge-required
- // error was created. If it's nil, ErrDischargeRequired will
- // be used.
- OriginalError error
-
- // CookiePath holds the path for the client to give the cookie
- // holding the discharged macaroon. If it's empty, then a
- // relative path from the request URL path to / will be used if
- // Request is provided, or "/" otherwise.
- CookiePath string
-
- // CookieNameSuffix holds the suffix for the client
- // to give the cookie holding the discharged macaroon
- // (after the "macaroon-" prefix).
- // If it's empty, "auth" will be used.
- CookieNameSuffix string
-
- // Request holds the request that the error is in response to.
- // It is used to form the cookie path if CookiePath is empty.
- Request *http.Request
-}
-
-// NewDischargeRequiredErrorWithVersion returns an error of type *Error
-// that contains a macaroon to the client and acts as a request that the
-// macaroon be discharged to authorize the request.
-//
-// The client is responsible for discharging the macaroon and
-// storing it as a cookie (or including it as a Macaroon header)
-// to be used for the subsequent request.
-func NewDischargeRequiredError(p DischargeRequiredErrorParams) error {
- if p.OriginalError == nil {
- p.OriginalError = ErrDischargeRequired
- }
- if p.CookiePath == "" {
- p.CookiePath = "/"
- if p.Request != nil {
- path, err := httputil.RelativeURLPath(p.Request.URL.Path, "/")
- if err == nil {
- p.CookiePath = path
- }
- }
- }
- if p.CookieNameSuffix == "" {
- p.CookieNameSuffix = "auth"
- }
- return &Error{
- version: p.Macaroon.Version(),
- Message: p.OriginalError.Error(),
- Code: ErrDischargeRequired,
- Info: &ErrorInfo{
- Macaroon: p.Macaroon,
- MacaroonPath: p.CookiePath,
- CookieNameSuffix: p.CookieNameSuffix,
- },
- }
-}
-
-// BakeryProtocolHeader is the header that HTTP clients should set
-// to determine the bakery protocol version. If it is 0 or missing,
-// a discharge-required error response will be returned with HTTP status 407;
-// if it is 1, the response will have status 401 with the WWW-Authenticate
-// header set to "Macaroon".
-const BakeryProtocolHeader = "Bakery-Protocol-Version"
-
-// RequestVersion determines the bakery protocol version from a client
-// request. If the protocol cannot be determined, or is invalid, the
-// original version of the protocol is used. If a later version is
-// found, the latest known version is used, which is OK because versions
-// are backwardly compatible.
-//
-// TODO as there are no known version 0 clients, default to version 1
-// instead.
-func RequestVersion(req *http.Request) bakery.Version {
- vs := req.Header.Get(BakeryProtocolHeader)
- if vs == "" {
- // No header - use backward compatibility mode.
- return bakery.Version0
- }
- x, err := strconv.Atoi(vs)
- if err != nil || x < 0 {
- // Badly formed header - use backward compatibility mode.
- return bakery.Version0
- }
- v := bakery.Version(x)
- if v > bakery.LatestVersion {
- // Later version than we know about - use the
- // latest version that we can.
- return bakery.LatestVersion
- }
- return v
-}
-
-func isDischargeRequiredError(err error) bool {
- respErr, ok := errgo.Cause(err).(*Error)
- if !ok {
- return false
- }
- return respErr.Code == ErrDischargeRequired
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/keyring.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/keyring.go
deleted file mode 100644
index b22610bb..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/keyring.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "net/http"
- "net/url"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
-)
-
-var _ bakery.ThirdPartyLocator = (*ThirdPartyLocator)(nil)
-
-// NewThirdPartyLocator returns a new third party
-// locator that uses the given client to find
-// information about third parties and
-// uses the given cache as a backing.
-//
-// If cache is nil, a new cache will be created.
-//
-// If client is nil, http.DefaultClient will be used.
-func NewThirdPartyLocator(client httprequest.Doer, cache *bakery.ThirdPartyStore) *ThirdPartyLocator {
- if cache == nil {
- cache = bakery.NewThirdPartyStore()
- }
- if client == nil {
- client = http.DefaultClient
- }
- return &ThirdPartyLocator{
- client: client,
- cache: cache,
- }
-}
-
-// AllowInsecureThirdPartyLocator holds whether ThirdPartyLocator allows
-// insecure HTTP connections for fetching third party information.
-// It is provided for testing purposes and should not be used
-// in production code.
-var AllowInsecureThirdPartyLocator = false
-
-// ThirdPartyLocator represents locator that can interrogate
-// third party discharge services for information. By default it refuses
-// to use insecure URLs.
-type ThirdPartyLocator struct {
- client httprequest.Doer
- allowInsecure bool
- cache *bakery.ThirdPartyStore
-}
-
-// AllowInsecure allows insecure URLs. This can be useful
-// for testing purposes. See also AllowInsecureThirdPartyLocator.
-func (kr *ThirdPartyLocator) AllowInsecure() {
- kr.allowInsecure = true
-}
-
-// ThirdPartyLocator implements bakery.ThirdPartyLocator
-// by first looking in the backing cache and, if that fails,
-// making an HTTP request to find the information associated
-// with the given discharge location.
-//
-// It refuses to fetch information from non-HTTPS URLs.
-func (kr *ThirdPartyLocator) ThirdPartyInfo(ctx context.Context, loc string) (bakery.ThirdPartyInfo, error) {
- // If the cache has an entry in, we can use it regardless of URL scheme.
- // This allows entries for notionally insecure URLs to be added by other means (for
- // example via a config file).
- info, err := kr.cache.ThirdPartyInfo(ctx, loc)
- if err == nil {
- return info, nil
- }
- u, err := url.Parse(loc)
- if err != nil {
- return bakery.ThirdPartyInfo{}, errgo.Notef(err, "invalid discharge URL %q", loc)
- }
- if u.Scheme != "https" && !kr.allowInsecure && !AllowInsecureThirdPartyLocator {
- return bakery.ThirdPartyInfo{}, errgo.Newf("untrusted discharge URL %q", loc)
- }
- info, err = ThirdPartyInfoForLocation(ctx, kr.client, loc)
- if err != nil {
- return bakery.ThirdPartyInfo{}, errgo.Mask(err)
- }
- kr.cache.AddInfo(loc, info)
- return info, nil
-}
-
-// ThirdPartyInfoForLocation returns information on the third party
-// discharge server running at the given location URL. Note that this is
-// insecure if an http: URL scheme is used. If client is nil,
-// http.DefaultClient will be used.
-func ThirdPartyInfoForLocation(ctx context.Context, client httprequest.Doer, url string) (bakery.ThirdPartyInfo, error) {
- dclient := newDischargeClient(url, client)
- info, err := dclient.DischargeInfo(ctx, &dischargeInfoRequest{})
- if err == nil {
- return bakery.ThirdPartyInfo{
- PublicKey: *info.PublicKey,
- Version: info.Version,
- }, nil
- }
- derr, ok := errgo.Cause(err).(*httprequest.DecodeResponseError)
- if !ok || derr.Response.StatusCode != http.StatusNotFound {
- return bakery.ThirdPartyInfo{}, errgo.Mask(err)
- }
- // The new endpoint isn't there, so try the old one.
- pkResp, err := dclient.PublicKey(ctx, &publicKeyRequest{})
- if err != nil {
- return bakery.ThirdPartyInfo{}, errgo.Mask(err)
- }
- return bakery.ThirdPartyInfo{
- PublicKey: *pkResp.PublicKey,
- Version: bakery.Version1,
- }, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/oven.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/oven.go
deleted file mode 100644
index c301ad13..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/oven.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "net/http"
- "time"
-
- "gopkg.in/errgo.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery/checkers"
-)
-
-// Oven is like bakery.Oven except it provides a method for
-// translating errors returned by bakery.AuthChecker into
-// errors suitable for passing to WriteError.
-type Oven struct {
- // Oven holds the bakery Oven used to create
- // new macaroons to put in discharge-required errors.
- *bakery.Oven
-
- // AuthnExpiry holds the expiry time of macaroons that
- // are created for authentication. As these are generally
- // applicable to all endpoints in an API, this is usually
- // longer than AuthzExpiry. If this is zero, DefaultAuthnExpiry
- // will be used.
- AuthnExpiry time.Duration
-
- // AuthzExpiry holds the expiry time of macaroons that are
- // created for authorization. As these are generally applicable
- // to specific operations, they generally don't need
- // a long lifespan, so this is usually shorter than AuthnExpiry.
- // If this is zero, DefaultAuthzExpiry will be used.
- AuthzExpiry time.Duration
-}
-
-// Default expiry times for macaroons created by Oven.Error.
-const (
- DefaultAuthnExpiry = 7 * 24 * time.Hour
- DefaultAuthzExpiry = 5 * time.Minute
-)
-
-// Error processes an error as returned from bakery.AuthChecker
-// into an error suitable for returning as a response to req
-// with WriteError.
-//
-// Specifically, it translates bakery.ErrPermissionDenied into
-// ErrPermissionDenied and bakery.DischargeRequiredError
-// into an Error with an ErrDischargeRequired code, using
-// oven.Oven to mint the macaroon in it.
-func (oven *Oven) Error(ctx context.Context, req *http.Request, err error) error {
- cause := errgo.Cause(err)
- if cause == bakery.ErrPermissionDenied {
- return errgo.WithCausef(err, ErrPermissionDenied, "")
- }
- derr, ok := cause.(*bakery.DischargeRequiredError)
- if !ok {
- return errgo.Mask(err)
- }
- // TODO it's possible to have more than two levels here - think
- // about some naming scheme for the cookies that allows that.
- expiryDuration := oven.AuthzExpiry
- if expiryDuration == 0 {
- expiryDuration = DefaultAuthzExpiry
- }
- cookieName := "authz"
- if derr.ForAuthentication {
- // Authentication macaroons are a bit different, so use
- // a different cookie name so both can be presented together.
- cookieName = "authn"
- expiryDuration = oven.AuthnExpiry
- if expiryDuration == 0 {
- expiryDuration = DefaultAuthnExpiry
- }
- }
- m, err := oven.Oven.NewMacaroon(ctx, RequestVersion(req), derr.Caveats, derr.Ops...)
- if err != nil {
- return errgo.Notef(err, "cannot mint new macaroon")
- }
- if err := m.AddCaveat(ctx, checkers.TimeBeforeCaveat(time.Now().Add(expiryDuration)), nil, nil); err != nil {
- return errgo.Notef(err, "cannot add time-before caveat")
- }
- return NewDischargeRequiredError(DischargeRequiredErrorParams{
- Macaroon: m,
- CookieNameSuffix: cookieName,
- Request: req,
- })
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/request.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/request.go
deleted file mode 100644
index 2f936d7c..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/request.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package httpbakery
-
-import (
- "bytes"
- "context"
- "io"
- "net/http"
- "reflect"
- "sync"
- "sync/atomic"
-
- "gopkg.in/errgo.v1"
-)
-
-// newRetrableRequest wraps an HTTP request so that it can
-// be retried without incurring race conditions and reports
-// whether the request can be retried.
-// The client instance will be used to make the request
-// when the do method is called.
-//
-// Because http.NewRequest often wraps its request bodies
-// with ioutil.NopCloser, which hides whether the body is
-// seekable, we extract the seeker from inside the nopCloser if
-// possible.
-//
-// We also work around Go issue 12796 by preventing concurrent
-// reads to the underlying reader after the request body has
-// been closed by Client.Do.
-//
-// The returned value should be closed after use.
-func newRetryableRequest(client *http.Client, req *http.Request) (*retryableRequest, bool) {
- if req.Body == nil {
- return &retryableRequest{
- client: client,
- ref: 1,
- req: req,
- origCookie: req.Header.Get("Cookie"),
- }, true
- }
- body := seekerFromBody(req.Body)
- if body == nil {
- return nil, false
- }
- return &retryableRequest{
- client: client,
- ref: 1,
- req: req,
- body: body,
- origCookie: req.Header.Get("Cookie"),
- }, true
-}
-
-type retryableRequest struct {
- client *http.Client
- ref int32
- origCookie string
- body readSeekCloser
- readStopper *readStopper
- req *http.Request
-}
-
-// do performs the HTTP request.
-func (rreq *retryableRequest) do(ctx context.Context) (*http.Response, error) {
- req, err := rreq.prepare()
- if err != nil {
- return nil, errgo.Mask(err)
- }
- return rreq.client.Do(req.WithContext(ctx))
-}
-
-// prepare returns a new HTTP request object
-// by copying the original request and seeking
-// back to the start of the original body if needed.
-//
-// It needs to make a copy of the request because
-// the HTTP code can access the Request.Body field
-// after Client.Do has returned, which means we can't
-// replace it for the second request.
-func (rreq *retryableRequest) prepare() (*http.Request, error) {
- req := new(http.Request)
- *req = *rreq.req
- // Make sure that the original cookie header is still in place
- // so that we only end up with the cookies that are actually
- // added by the HTTP cookie logic, and not the ones that were
- // added in previous requests too.
- req.Header.Set("Cookie", rreq.origCookie)
- if rreq.body == nil {
- // No need for any of the seek shenanigans.
- return req, nil
- }
- if rreq.readStopper != nil {
- // We've made a previous request. Close its request
- // body so it can't interfere with the new request's body
- // and then seek back to the start.
- rreq.readStopper.Close()
- if _, err := rreq.body.Seek(0, 0); err != nil {
- return nil, errgo.Notef(err, "cannot seek to start of request body")
- }
- }
- atomic.AddInt32(&rreq.ref, 1)
- // Replace the request body with a new readStopper so that
- // we can stop a second request from interfering with current
- // request's body.
- rreq.readStopper = &readStopper{
- rreq: rreq,
- r: rreq.body,
- }
- req.Body = rreq.readStopper
- return req, nil
-}
-
-// close closes the request. It closes the underlying reader
-// when all references have gone.
-func (req *retryableRequest) close() error {
- if atomic.AddInt32(&req.ref, -1) == 0 && req.body != nil {
- // We've closed it for the last time, so actually close
- // the original body.
- return req.body.Close()
- }
- return nil
-}
-
-// readStopper works around an issue with the net/http
-// package (see http://golang.org/issue/12796).
-// Because the first HTTP request might not have finished
-// reading from its body when it returns, we need to
-// ensure that the second request does not race on Read,
-// so this type implements a Reader that prevents all Read
-// calls to the underlying Reader after Close has been called.
-type readStopper struct {
- rreq *retryableRequest
- mu sync.Mutex
- r io.ReadSeeker
-}
-
-func (r *readStopper) Read(buf []byte) (int, error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.r == nil {
- // Note: we have to use io.EOF here because otherwise
- // another connection can in rare circumstances be
- // polluted by the error returned here. Although this
- // means the file may appear truncated to the server,
- // that shouldn't matter because the body will only
- // be closed after the server has replied.
- return 0, io.EOF
- }
- return r.r.Read(buf)
-}
-
-func (r *readStopper) Close() error {
- r.mu.Lock()
- alreadyClosed := r.r == nil
- r.r = nil
- r.mu.Unlock()
- if alreadyClosed {
- return nil
- }
- return r.rreq.close()
-}
-
-var nopCloserType = reflect.TypeOf(io.NopCloser(nil))
-var nopCloserWriterToType = reflect.TypeOf(io.NopCloser(bytes.NewReader([]byte{})))
-
-type readSeekCloser interface {
- io.ReadSeeker
- io.Closer
-}
-
-// seekerFromBody tries to obtain a seekable reader
-// from the given request body.
-func seekerFromBody(r io.ReadCloser) readSeekCloser {
- if r, ok := r.(readSeekCloser); ok {
- return r
- }
- rv := reflect.ValueOf(r)
- if rv.Type() != nopCloserType && rv.Type() != nopCloserWriterToType {
- return nil
- }
- // It's a value created by nopCloser. Extract the
- // underlying Reader. Note that this works
- // because the ioutil.nopCloser type exports
- // its Reader field.
- rs, ok := rv.Field(0).Interface().(io.ReadSeeker)
- if !ok {
- return nil
- }
- return readSeekerWithNopClose{rs}
-}
-
-type readSeekerWithNopClose struct {
- io.ReadSeeker
-}
-
-func (r readSeekerWithNopClose) Close() error {
- return nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/visitor.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/visitor.go
deleted file mode 100644
index 047ebbad..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/httpbakery/visitor.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package httpbakery
-
-import (
- "context"
- "net/http"
- "net/url"
-
- "gopkg.in/errgo.v1"
- "gopkg.in/httprequest.v1"
-
- "github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
-)
-
-// TODO(rog) rename this file.
-
-// legacyGetInteractionMethods queries a URL as found in an
-// ErrInteractionRequired VisitURL field to find available interaction
-// methods.
-//
-// It does this by sending a GET request to the URL with the Accept
-// header set to "application/json" and parsing the resulting
-// response as a map[string]string.
-//
-// It uses the given Doer to execute the HTTP GET request.
-func legacyGetInteractionMethods(ctx context.Context, logger bakery.Logger, client httprequest.Doer, u *url.URL) map[string]*url.URL {
- methodURLs, err := legacyGetInteractionMethods1(ctx, client, u)
- if err != nil {
- // When a discharger doesn't support retrieving interaction methods,
- // we expect to get an error, because it's probably returning an HTML
- // page not JSON.
- if logger != nil {
- logger.Debugf(ctx, "ignoring error: cannot get interaction methods: %v; %s", err, errgo.Details(err))
- }
- methodURLs = make(map[string]*url.URL)
- }
- if methodURLs["interactive"] == nil {
- // There's no "interactive" method returned, but we know
- // the server does actually support it, because all dischargers
- // are required to, so fill it in with the original URL.
- methodURLs["interactive"] = u
- }
- return methodURLs
-}
-
-func legacyGetInteractionMethods1(ctx context.Context, client httprequest.Doer, u *url.URL) (map[string]*url.URL, error) {
- httpReqClient := &httprequest.Client{
- Doer: client,
- }
- req, err := http.NewRequest("GET", u.String(), nil)
- if err != nil {
- return nil, errgo.Notef(err, "cannot create request")
- }
- req.Header.Set("Accept", "application/json")
- var methodURLStrs map[string]string
- if err := httpReqClient.Do(ctx, req, &methodURLStrs); err != nil {
- return nil, errgo.Mask(err)
- }
- // Make all the URLs relative to the request URL.
- methodURLs := make(map[string]*url.URL)
- for m, urlStr := range methodURLStrs {
- relURL, err := url.Parse(urlStr)
- if err != nil {
- return nil, errgo.Notef(err, "invalid URL for interaction method %q", m)
- }
- methodURLs[m] = u.ResolveReference(relURL)
- }
- return methodURLs, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil/relativeurl.go b/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil/relativeurl.go
deleted file mode 100644
index a9431fa6..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroon-bakery/v3/internal/httputil/relativeurl.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2016 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-// Note: this code was copied from github.com/juju/utils.
-
-// Package httputil holds utility functions related to net/http.
-package httputil
-
-import (
- "errors"
- "strings"
-)
-
-// RelativeURLPath returns a relative URL path that is lexically
-// equivalent to targpath when interpreted by url.URL.ResolveReference.
-// On success, the returned path will always be non-empty and relative
-// to basePath, even if basePath and targPath share no elements.
-//
-// It is assumed that both basePath and targPath are normalized
-// (have no . or .. elements).
-//
-// An error is returned if basePath or targPath are not absolute paths.
-func RelativeURLPath(basePath, targPath string) (string, error) {
- if !strings.HasPrefix(basePath, "/") {
- return "", errors.New("non-absolute base URL")
- }
- if !strings.HasPrefix(targPath, "/") {
- return "", errors.New("non-absolute target URL")
- }
- baseParts := strings.Split(basePath, "/")
- targParts := strings.Split(targPath, "/")
-
- // For the purposes of dotdot, the last element of
- // the paths are irrelevant. We save the last part
- // of the target path for later.
- lastElem := targParts[len(targParts)-1]
- baseParts = baseParts[0 : len(baseParts)-1]
- targParts = targParts[0 : len(targParts)-1]
-
- // Find the common prefix between the two paths:
- var i int
- for ; i < len(baseParts); i++ {
- if i >= len(targParts) || baseParts[i] != targParts[i] {
- break
- }
- }
- dotdotCount := len(baseParts) - i
- targOnly := targParts[i:]
- result := make([]string, 0, dotdotCount+len(targOnly)+1)
- for i := 0; i < dotdotCount; i++ {
- result = append(result, "..")
- }
- result = append(result, targOnly...)
- result = append(result, lastElem)
- final := strings.Join(result, "/")
- if final == "" {
- // If the final result is empty, the last element must
- // have been empty, so the target was slash terminated
- // and there were no previous elements, so "."
- // is appropriate.
- final = "."
- }
- return final, nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE b/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE
deleted file mode 100644
index 67c4fb56..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE
+++ /dev/null
@@ -1,187 +0,0 @@
-Copyright © 2014, Roger Peppe, Canonical Inc.
-
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md b/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md
deleted file mode 100644
index 4d03b8a8..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Macaroon ID Protocol Buffers
-
-This module defines the serialization format of macaroon identifiers for
-macaroons created by the macaroon-bakery. For the most part this encoding
-is considered an internal implementation detail of the macaroon-bakery
-and external applications should not rely on any of the details of this
-encoding being maintained between different bakery versions.
-
-This is broken out into a separate module as the protobuf implementation
-works in such a way that one cannot have multiple definitions of a
-message in any particular application's dependency tree. This module
-therefore provides a common definition for use by multiple versions of
-the macaroon-bakery to facilitate easier migration in client applications.
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go
deleted file mode 100644
index f7ddc18b..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Package macaroonpb defines the serialization details of macaroon ids
-// used in the macaroon-bakery.
-package macaroonpb
-
-import (
- "github.com/golang/protobuf/proto"
-)
-
-//go:generate protoc --go_out . id.proto
-
-// MarshalBinary implements encoding.BinaryMarshal.
-func (id *MacaroonId) MarshalBinary() ([]byte, error) {
- return proto.Marshal(id)
-}
-
-// UnmarshalBinary implements encoding.UnmarshalBinary.
-func (id *MacaroonId) UnmarshalBinary(data []byte) error {
- return proto.Unmarshal(data, id)
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go
deleted file mode 100644
index 41b69d9d..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.23.0
-// protoc v3.12.3
-// source: id.proto
-
-package macaroonpb
-
-import (
- proto "github.com/golang/protobuf/proto"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// This is a compile-time assertion that a sufficiently up-to-date version
-// of the legacy proto package is being used.
-const _ = proto.ProtoPackageIsVersion4
-
-type MacaroonId struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Nonce []byte `protobuf:"bytes,1,opt,name=nonce,proto3" json:"nonce,omitempty"`
- StorageId []byte `protobuf:"bytes,2,opt,name=storageId,proto3" json:"storageId,omitempty"`
- Ops []*Op `protobuf:"bytes,3,rep,name=ops,proto3" json:"ops,omitempty"`
-}
-
-func (x *MacaroonId) Reset() {
- *x = MacaroonId{}
- if protoimpl.UnsafeEnabled {
- mi := &file_id_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MacaroonId) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MacaroonId) ProtoMessage() {}
-
-func (x *MacaroonId) ProtoReflect() protoreflect.Message {
- mi := &file_id_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MacaroonId.ProtoReflect.Descriptor instead.
-func (*MacaroonId) Descriptor() ([]byte, []int) {
- return file_id_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *MacaroonId) GetNonce() []byte {
- if x != nil {
- return x.Nonce
- }
- return nil
-}
-
-func (x *MacaroonId) GetStorageId() []byte {
- if x != nil {
- return x.StorageId
- }
- return nil
-}
-
-func (x *MacaroonId) GetOps() []*Op {
- if x != nil {
- return x.Ops
- }
- return nil
-}
-
-type Op struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
- Actions []string `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"`
-}
-
-func (x *Op) Reset() {
- *x = Op{}
- if protoimpl.UnsafeEnabled {
- mi := &file_id_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Op) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Op) ProtoMessage() {}
-
-func (x *Op) ProtoReflect() protoreflect.Message {
- mi := &file_id_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Op.ProtoReflect.Descriptor instead.
-func (*Op) Descriptor() ([]byte, []int) {
- return file_id_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *Op) GetEntity() string {
- if x != nil {
- return x.Entity
- }
- return ""
-}
-
-func (x *Op) GetActions() []string {
- if x != nil {
- return x.Actions
- }
- return nil
-}
-
-var File_id_proto protoreflect.FileDescriptor
-
-var file_id_proto_rawDesc = []byte{
- 0x0a, 0x08, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x0a, 0x4d, 0x61,
- 0x63, 0x61, 0x72, 0x6f, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x1c,
- 0x0a, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x15, 0x0a, 0x03,
- 0x6f, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x03, 0x2e, 0x4f, 0x70, 0x52, 0x03,
- 0x6f, 0x70, 0x73, 0x22, 0x36, 0x0a, 0x02, 0x4f, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74,
- 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0e, 0x5a, 0x0c, 0x2e,
- 0x3b, 0x6d, 0x61, 0x63, 0x61, 0x72, 0x6f, 0x6f, 0x6e, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x33,
-}
-
-var (
- file_id_proto_rawDescOnce sync.Once
- file_id_proto_rawDescData = file_id_proto_rawDesc
-)
-
-func file_id_proto_rawDescGZIP() []byte {
- file_id_proto_rawDescOnce.Do(func() {
- file_id_proto_rawDescData = protoimpl.X.CompressGZIP(file_id_proto_rawDescData)
- })
- return file_id_proto_rawDescData
-}
-
-var file_id_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_id_proto_goTypes = []interface{}{
- (*MacaroonId)(nil), // 0: MacaroonId
- (*Op)(nil), // 1: Op
-}
-var file_id_proto_depIdxs = []int32{
- 1, // 0: MacaroonId.ops:type_name -> Op
- 1, // [1:1] is the sub-list for method output_type
- 1, // [1:1] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
-}
-
-func init() { file_id_proto_init() }
-func file_id_proto_init() {
- if File_id_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_id_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MacaroonId); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_id_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Op); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_id_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 2,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_id_proto_goTypes,
- DependencyIndexes: file_id_proto_depIdxs,
- MessageInfos: file_id_proto_msgTypes,
- }.Build()
- File_id_proto = out.File
- file_id_proto_rawDesc = nil
- file_id_proto_goTypes = nil
- file_id_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto
deleted file mode 100644
index bfe891ee..00000000
--- a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto
+++ /dev/null
@@ -1,14 +0,0 @@
-syntax="proto3";
-
-option go_package = ".;macaroonpb";
-
-message MacaroonId {
- bytes nonce = 1;
- bytes storageId = 2;
- repeated Op ops = 3;
-}
-
-message Op {
- string entity = 1;
- repeated string actions = 2;
-}
diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml
new file mode 100644
index 00000000..841c4281
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/.codecov.yml
@@ -0,0 +1,5 @@
+coverage:
+ status:
+ patch:
+ default:
+ target: 80%
diff --git a/vendor/github.com/go-openapi/analysis/.gitattributes b/vendor/github.com/go-openapi/analysis/.gitattributes
new file mode 100644
index 00000000..d020be8e
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/.gitattributes
@@ -0,0 +1,2 @@
+*.go text eol=lf
+
diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore
new file mode 100644
index 00000000..87c3bd3e
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/.gitignore
@@ -0,0 +1,5 @@
+secrets.yml
+coverage.out
+coverage.txt
+*.cov
+.idea
diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml
new file mode 100644
index 00000000..22f8d21c
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE
similarity index 100%
rename from vendor/google.golang.org/appengine/LICENSE
rename to vendor/github.com/go-openapi/analysis/LICENSE
diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md
new file mode 100644
index 00000000..e005d4b3
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/README.md
@@ -0,0 +1,27 @@
+# OpenAPI analysis [](https://github.com/go-openapi/analysis/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/analysis)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/analysis)
+[](https://goreportcard.com/report/github.com/go-openapi/analysis)
+
+
+A foundational library to analyze an OAI specification document for easier reasoning about the content.
+
+## What's inside?
+
+* An analyzer providing methods to walk the functional content of a specification
+* A spec flattener producing a self-contained document bundle, while preserving `$ref`s
+* A spec merger ("mixin") to merge several spec documents into a primary spec
+* A spec "fixer" ensuring that response descriptions are non empty
+
+[Documentation](https://pkg.go.dev/github.com/go-openapi/analysis)
+
+## FAQ
+
+* Does this library support OpenAPI 3?
+
+> No.
+> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0).
+> There is no plan to make it evolve toward supporting OpenAPI 3.x.
+> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story.
diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go
new file mode 100644
index 00000000..c17aee1b
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/analyzer.go
@@ -0,0 +1,1064 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package analysis
+
+import (
+ "fmt"
+ slashpath "path"
+ "strconv"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/swag"
+)
+
+type referenceAnalysis struct {
+ schemas map[string]spec.Ref
+ responses map[string]spec.Ref
+ parameters map[string]spec.Ref
+ items map[string]spec.Ref
+ headerItems map[string]spec.Ref
+ parameterItems map[string]spec.Ref
+ allRefs map[string]spec.Ref
+ pathItems map[string]spec.Ref
+}
+
+func (r *referenceAnalysis) addRef(key string, ref spec.Ref) {
+ r.allRefs["#"+key] = ref
+}
+
+func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) {
+ r.items["#"+key] = items.Ref
+ r.addRef(key, items.Ref)
+ if location == "header" {
+ // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas
+ // and $ref are not supported here. However it is possible to analyze this.
+ r.headerItems["#"+key] = items.Ref
+ } else {
+ r.parameterItems["#"+key] = items.Ref
+ }
+}
+
+func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) {
+ r.schemas["#"+key] = ref.Schema.Ref
+ r.addRef(key, ref.Schema.Ref)
+}
+
+func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) {
+ r.responses["#"+key] = resp.Ref
+ r.addRef(key, resp.Ref)
+}
+
+func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) {
+ r.parameters["#"+key] = param.Ref
+ r.addRef(key, param.Ref)
+}
+
+func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) {
+ r.pathItems["#"+key] = pathItem.Ref
+ r.addRef(key, pathItem.Ref)
+}
+
+type patternAnalysis struct {
+ parameters map[string]string
+ headers map[string]string
+ items map[string]string
+ schemas map[string]string
+ allPatterns map[string]string
+}
+
+func (p *patternAnalysis) addPattern(key, pattern string) {
+ p.allPatterns["#"+key] = pattern
+}
+
+func (p *patternAnalysis) addParameterPattern(key, pattern string) {
+ p.parameters["#"+key] = pattern
+ p.addPattern(key, pattern)
+}
+
+func (p *patternAnalysis) addHeaderPattern(key, pattern string) {
+ p.headers["#"+key] = pattern
+ p.addPattern(key, pattern)
+}
+
+func (p *patternAnalysis) addItemsPattern(key, pattern string) {
+ p.items["#"+key] = pattern
+ p.addPattern(key, pattern)
+}
+
+func (p *patternAnalysis) addSchemaPattern(key, pattern string) {
+ p.schemas["#"+key] = pattern
+ p.addPattern(key, pattern)
+}
+
+type enumAnalysis struct {
+ parameters map[string][]interface{}
+ headers map[string][]interface{}
+ items map[string][]interface{}
+ schemas map[string][]interface{}
+ allEnums map[string][]interface{}
+}
+
+func (p *enumAnalysis) addEnum(key string, enum []interface{}) {
+ p.allEnums["#"+key] = enum
+}
+
+func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) {
+ p.parameters["#"+key] = enum
+ p.addEnum(key, enum)
+}
+
+func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) {
+ p.headers["#"+key] = enum
+ p.addEnum(key, enum)
+}
+
+func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) {
+ p.items["#"+key] = enum
+ p.addEnum(key, enum)
+}
+
+func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) {
+ p.schemas["#"+key] = enum
+ p.addEnum(key, enum)
+}
+
+// New takes a swagger spec object and returns an analyzed spec document.
+// The analyzed document contains a number of indices that make it easier to
+// reason about semantics of a swagger specification for use in code generation
+// or validation etc.
+func New(doc *spec.Swagger) *Spec {
+ a := &Spec{
+ spec: doc,
+ references: referenceAnalysis{},
+ patterns: patternAnalysis{},
+ enums: enumAnalysis{},
+ }
+ a.reset()
+ a.initialize()
+
+ return a
+}
+
+// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry
+// with a bunch of utility methods to act on the information in the spec.
+type Spec struct {
+ spec *spec.Swagger
+ consumes map[string]struct{}
+ produces map[string]struct{}
+ authSchemes map[string]struct{}
+ operations map[string]map[string]*spec.Operation
+ references referenceAnalysis
+ patterns patternAnalysis
+ enums enumAnalysis
+ allSchemas map[string]SchemaRef
+ allOfs map[string]SchemaRef
+}
+
+func (s *Spec) reset() {
+ s.consumes = make(map[string]struct{}, 150)
+ s.produces = make(map[string]struct{}, 150)
+ s.authSchemes = make(map[string]struct{}, 150)
+ s.operations = make(map[string]map[string]*spec.Operation, 150)
+ s.allSchemas = make(map[string]SchemaRef, 150)
+ s.allOfs = make(map[string]SchemaRef, 150)
+ s.references.schemas = make(map[string]spec.Ref, 150)
+ s.references.pathItems = make(map[string]spec.Ref, 150)
+ s.references.responses = make(map[string]spec.Ref, 150)
+ s.references.parameters = make(map[string]spec.Ref, 150)
+ s.references.items = make(map[string]spec.Ref, 150)
+ s.references.headerItems = make(map[string]spec.Ref, 150)
+ s.references.parameterItems = make(map[string]spec.Ref, 150)
+ s.references.allRefs = make(map[string]spec.Ref, 150)
+ s.patterns.parameters = make(map[string]string, 150)
+ s.patterns.headers = make(map[string]string, 150)
+ s.patterns.items = make(map[string]string, 150)
+ s.patterns.schemas = make(map[string]string, 150)
+ s.patterns.allPatterns = make(map[string]string, 150)
+ s.enums.parameters = make(map[string][]interface{}, 150)
+ s.enums.headers = make(map[string][]interface{}, 150)
+ s.enums.items = make(map[string][]interface{}, 150)
+ s.enums.schemas = make(map[string][]interface{}, 150)
+ s.enums.allEnums = make(map[string][]interface{}, 150)
+}
+
+func (s *Spec) reload() {
+ s.reset()
+ s.initialize()
+}
+
+func (s *Spec) initialize() {
+ for _, c := range s.spec.Consumes {
+ s.consumes[c] = struct{}{}
+ }
+ for _, c := range s.spec.Produces {
+ s.produces[c] = struct{}{}
+ }
+ for _, ss := range s.spec.Security {
+ for k := range ss {
+ s.authSchemes[k] = struct{}{}
+ }
+ }
+ for path, pathItem := range s.AllPaths() {
+ s.analyzeOperations(path, &pathItem) //#nosec
+ }
+
+ for name, parameter := range s.spec.Parameters {
+ refPref := slashpath.Join("/parameters", jsonpointer.Escape(name))
+ if parameter.Items != nil {
+ s.analyzeItems("items", parameter.Items, refPref, "parameter")
+ }
+ if parameter.In == "body" && parameter.Schema != nil {
+ s.analyzeSchema("schema", parameter.Schema, refPref)
+ }
+ if parameter.Pattern != "" {
+ s.patterns.addParameterPattern(refPref, parameter.Pattern)
+ }
+ if len(parameter.Enum) > 0 {
+ s.enums.addParameterEnum(refPref, parameter.Enum)
+ }
+ }
+
+ for name, response := range s.spec.Responses {
+ refPref := slashpath.Join("/responses", jsonpointer.Escape(name))
+ for k, v := range response.Headers {
+ hRefPref := slashpath.Join(refPref, "headers", k)
+ if v.Items != nil {
+ s.analyzeItems("items", v.Items, hRefPref, "header")
+ }
+ if v.Pattern != "" {
+ s.patterns.addHeaderPattern(hRefPref, v.Pattern)
+ }
+ if len(v.Enum) > 0 {
+ s.enums.addHeaderEnum(hRefPref, v.Enum)
+ }
+ }
+ if response.Schema != nil {
+ s.analyzeSchema("schema", response.Schema, refPref)
+ }
+ }
+
+ for name := range s.spec.Definitions {
+ schema := s.spec.Definitions[name]
+ s.analyzeSchema(name, &schema, "/definitions")
+ }
+ // TODO: after analyzing all things and flattening schemas etc
+ // resolve all the collected references to their final representations
+ // best put in a separate method because this could get expensive
+}
+
+func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) {
+ // TODO: resolve refs here?
+ // Currently, operations declared via pathItem $ref are known only after expansion
+ op := pi
+ if pi.Ref.String() != "" {
+ key := slashpath.Join("/paths", jsonpointer.Escape(path))
+ s.references.addPathItemRef(key, pi)
+ }
+ s.analyzeOperation("GET", path, op.Get)
+ s.analyzeOperation("PUT", path, op.Put)
+ s.analyzeOperation("POST", path, op.Post)
+ s.analyzeOperation("PATCH", path, op.Patch)
+ s.analyzeOperation("DELETE", path, op.Delete)
+ s.analyzeOperation("HEAD", path, op.Head)
+ s.analyzeOperation("OPTIONS", path, op.Options)
+ for i, param := range op.Parameters {
+ refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i))
+ if param.Ref.String() != "" {
+ s.references.addParamRef(refPref, ¶m) //#nosec
+ }
+ if param.Pattern != "" {
+ s.patterns.addParameterPattern(refPref, param.Pattern)
+ }
+ if len(param.Enum) > 0 {
+ s.enums.addParameterEnum(refPref, param.Enum)
+ }
+ if param.Items != nil {
+ s.analyzeItems("items", param.Items, refPref, "parameter")
+ }
+ if param.Schema != nil {
+ s.analyzeSchema("schema", param.Schema, refPref)
+ }
+ }
+}
+
+func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) {
+ if items == nil {
+ return
+ }
+ refPref := slashpath.Join(prefix, name)
+ s.analyzeItems(name, items.Items, refPref, location)
+ if items.Ref.String() != "" {
+ s.references.addItemsRef(refPref, items, location)
+ }
+ if items.Pattern != "" {
+ s.patterns.addItemsPattern(refPref, items.Pattern)
+ }
+ if len(items.Enum) > 0 {
+ s.enums.addItemsEnum(refPref, items.Enum)
+ }
+}
+
+func (s *Spec) analyzeParameter(prefix string, i int, param spec.Parameter) {
+ refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i))
+ if param.Ref.String() != "" {
+ s.references.addParamRef(refPref, ¶m) //#nosec
+ }
+
+ if param.Pattern != "" {
+ s.patterns.addParameterPattern(refPref, param.Pattern)
+ }
+
+ if len(param.Enum) > 0 {
+ s.enums.addParameterEnum(refPref, param.Enum)
+ }
+
+ s.analyzeItems("items", param.Items, refPref, "parameter")
+ if param.In == "body" && param.Schema != nil {
+ s.analyzeSchema("schema", param.Schema, refPref)
+ }
+}
+
+func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) {
+ if op == nil {
+ return
+ }
+
+ for _, c := range op.Consumes {
+ s.consumes[c] = struct{}{}
+ }
+
+ for _, c := range op.Produces {
+ s.produces[c] = struct{}{}
+ }
+
+ for _, ss := range op.Security {
+ for k := range ss {
+ s.authSchemes[k] = struct{}{}
+ }
+ }
+
+ if _, ok := s.operations[method]; !ok {
+ s.operations[method] = make(map[string]*spec.Operation)
+ }
+
+ s.operations[method][path] = op
+ prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method))
+ for i, param := range op.Parameters {
+ s.analyzeParameter(prefix, i, param)
+ }
+
+ if op.Responses == nil {
+ return
+ }
+
+ if op.Responses.Default != nil {
+ s.analyzeDefaultResponse(prefix, op.Responses.Default)
+ }
+
+ for k, res := range op.Responses.StatusCodeResponses {
+ s.analyzeResponse(prefix, k, res)
+ }
+}
+
+func (s *Spec) analyzeDefaultResponse(prefix string, res *spec.Response) {
+ refPref := slashpath.Join(prefix, "responses", "default")
+ if res.Ref.String() != "" {
+ s.references.addResponseRef(refPref, res)
+ }
+
+ for k, v := range res.Headers {
+ hRefPref := slashpath.Join(refPref, "headers", k)
+ s.analyzeItems("items", v.Items, hRefPref, "header")
+ if v.Pattern != "" {
+ s.patterns.addHeaderPattern(hRefPref, v.Pattern)
+ }
+ }
+
+ if res.Schema != nil {
+ s.analyzeSchema("schema", res.Schema, refPref)
+ }
+}
+
+func (s *Spec) analyzeResponse(prefix string, k int, res spec.Response) {
+ refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k))
+ if res.Ref.String() != "" {
+ s.references.addResponseRef(refPref, &res) //#nosec
+ }
+
+ for k, v := range res.Headers {
+ hRefPref := slashpath.Join(refPref, "headers", k)
+ s.analyzeItems("items", v.Items, hRefPref, "header")
+ if v.Pattern != "" {
+ s.patterns.addHeaderPattern(hRefPref, v.Pattern)
+ }
+
+ if len(v.Enum) > 0 {
+ s.enums.addHeaderEnum(hRefPref, v.Enum)
+ }
+ }
+
+ if res.Schema != nil {
+ s.analyzeSchema("schema", res.Schema, refPref)
+ }
+}
+
+func (s *Spec) analyzeSchema(name string, schema *spec.Schema, prefix string) {
+ refURI := slashpath.Join(prefix, jsonpointer.Escape(name))
+ schRef := SchemaRef{
+ Name: name,
+ Schema: schema,
+ Ref: spec.MustCreateRef("#" + refURI),
+ TopLevel: prefix == "/definitions",
+ }
+
+ s.allSchemas["#"+refURI] = schRef
+
+ if schema.Ref.String() != "" {
+ s.references.addSchemaRef(refURI, schRef)
+ }
+
+ if schema.Pattern != "" {
+ s.patterns.addSchemaPattern(refURI, schema.Pattern)
+ }
+
+ if len(schema.Enum) > 0 {
+ s.enums.addSchemaEnum(refURI, schema.Enum)
+ }
+
+ for k, v := range schema.Definitions {
+ v := v
+ s.analyzeSchema(k, &v, slashpath.Join(refURI, "definitions"))
+ }
+
+ for k, v := range schema.Properties {
+ v := v
+ s.analyzeSchema(k, &v, slashpath.Join(refURI, "properties"))
+ }
+
+ for k, v := range schema.PatternProperties {
+ v := v
+ // NOTE: swagger 2.0 does not support PatternProperties.
+ // However it is possible to analyze this in a schema
+ s.analyzeSchema(k, &v, slashpath.Join(refURI, "patternProperties"))
+ }
+
+ for i := range schema.AllOf {
+ v := &schema.AllOf[i]
+ s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf"))
+ }
+
+ if len(schema.AllOf) > 0 {
+ s.allOfs["#"+refURI] = schRef
+ }
+
+ for i := range schema.AnyOf {
+ v := &schema.AnyOf[i]
+ // NOTE: swagger 2.0 does not support anyOf constructs.
+ // However it is possible to analyze this in a schema
+ s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf"))
+ }
+
+ for i := range schema.OneOf {
+ v := &schema.OneOf[i]
+ // NOTE: swagger 2.0 does not support oneOf constructs.
+ // However it is possible to analyze this in a schema
+ s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf"))
+ }
+
+ if schema.Not != nil {
+ // NOTE: swagger 2.0 does not support "not" constructs.
+ // However it is possible to analyze this in a schema
+ s.analyzeSchema("not", schema.Not, refURI)
+ }
+
+ if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
+ s.analyzeSchema("additionalProperties", schema.AdditionalProperties.Schema, refURI)
+ }
+
+ if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
+ // NOTE: swagger 2.0 does not support AdditionalItems.
+ // However it is possible to analyze this in a schema
+ s.analyzeSchema("additionalItems", schema.AdditionalItems.Schema, refURI)
+ }
+
+ if schema.Items != nil {
+ if schema.Items.Schema != nil {
+ s.analyzeSchema("items", schema.Items.Schema, refURI)
+ }
+
+ for i := range schema.Items.Schemas {
+ sch := &schema.Items.Schemas[i]
+ s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items"))
+ }
+ }
+}
+
+// SecurityRequirement is a representation of a security requirement for an operation
+type SecurityRequirement struct {
+ Name string
+ Scopes []string
+}
+
+// SecurityRequirementsFor gets the security requirements for the operation
+func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement {
+ if s.spec.Security == nil && operation.Security == nil {
+ return nil
+ }
+
+ schemes := s.spec.Security
+ if operation.Security != nil {
+ schemes = operation.Security
+ }
+
+ result := [][]SecurityRequirement{}
+ for _, scheme := range schemes {
+ if len(scheme) == 0 {
+ // append a zero object for anonymous
+ result = append(result, []SecurityRequirement{{}})
+
+ continue
+ }
+
+ var reqs []SecurityRequirement
+ for k, v := range scheme {
+ if v == nil {
+ v = []string{}
+ }
+ reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v})
+ }
+
+ result = append(result, reqs)
+ }
+
+ return result
+}
+
+// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements
+func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme {
+ result := make(map[string]spec.SecurityScheme)
+
+ for _, v := range requirements {
+ if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok {
+ if definition != nil {
+ result[v.Name] = *definition
+ }
+ }
+ }
+
+ return result
+}
+
+// SecurityDefinitionsFor gets the matching security definitions for a set of requirements
+func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme {
+ requirements := s.SecurityRequirementsFor(operation)
+ if len(requirements) == 0 {
+ return nil
+ }
+
+ result := make(map[string]spec.SecurityScheme)
+ for _, reqs := range requirements {
+ for _, v := range reqs {
+ if v.Name == "" {
+ // optional requirement
+ continue
+ }
+
+ if _, ok := result[v.Name]; ok {
+ // duplicate requirement
+ continue
+ }
+
+ if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok {
+ if definition != nil {
+ result[v.Name] = *definition
+ }
+ }
+ }
+ }
+
+ return result
+}
+
+// ConsumesFor gets the mediatypes for the operation
+func (s *Spec) ConsumesFor(operation *spec.Operation) []string {
+ if len(operation.Consumes) == 0 {
+ cons := make(map[string]struct{}, len(s.spec.Consumes))
+ for _, k := range s.spec.Consumes {
+ cons[k] = struct{}{}
+ }
+
+ return s.structMapKeys(cons)
+ }
+
+ cons := make(map[string]struct{}, len(operation.Consumes))
+ for _, c := range operation.Consumes {
+ cons[c] = struct{}{}
+ }
+
+ return s.structMapKeys(cons)
+}
+
+// ProducesFor gets the mediatypes for the operation
+func (s *Spec) ProducesFor(operation *spec.Operation) []string {
+ if len(operation.Produces) == 0 {
+ prod := make(map[string]struct{}, len(s.spec.Produces))
+ for _, k := range s.spec.Produces {
+ prod[k] = struct{}{}
+ }
+
+ return s.structMapKeys(prod)
+ }
+
+ prod := make(map[string]struct{}, len(operation.Produces))
+ for _, c := range operation.Produces {
+ prod[c] = struct{}{}
+ }
+
+ return s.structMapKeys(prod)
+}
+
+func mapKeyFromParam(param *spec.Parameter) string {
+ return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param))
+}
+
+func fieldNameFromParam(param *spec.Parameter) string {
+ // TODO: this should be x-go-name
+ if nm, ok := param.Extensions.GetString("go-name"); ok {
+ return nm
+ }
+
+ return swag.ToGoName(param.Name)
+}
+
+// ErrorOnParamFunc is a callback function to be invoked
+// whenever an error is encountered while resolving references
+// on parameters.
+//
+// This function takes as input the spec.Parameter which triggered the
+// error and the error itself.
+//
+// If the callback function returns false, the calling function should bail.
+//
+// If it returns true, the calling function should continue evaluating parameters.
+// A nil ErrorOnParamFunc must be evaluated as equivalent to panic().
+type ErrorOnParamFunc func(spec.Parameter, error) bool
+
+func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) {
+ for _, param := range parameters {
+ pr := param
+ if pr.Ref.String() == "" {
+ res[mapKeyFromParam(&pr)] = pr
+
+ continue
+ }
+
+ // resolve $ref
+ if callmeOnError == nil {
+ callmeOnError = func(_ spec.Parameter, err error) bool {
+ panic(err)
+ }
+ }
+
+ obj, _, err := pr.Ref.GetPointer().Get(s.spec)
+ if err != nil {
+ if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) {
+ continue
+ }
+
+ break
+ }
+
+ objAsParam, ok := obj.(spec.Parameter)
+ if !ok {
+ if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) {
+ continue
+ }
+
+ break
+ }
+
+ pr = objAsParam
+ res[mapKeyFromParam(&pr)] = pr
+ }
+}
+
+// ParametersFor the specified operation id.
+//
+// Assumes parameters properly resolve references if any and that
+// such references actually resolve to a parameter object.
+// Otherwise, panics.
+func (s *Spec) ParametersFor(operationID string) []spec.Parameter {
+ return s.SafeParametersFor(operationID, nil)
+}
+
+// SafeParametersFor the specified operation id.
+//
+// Does not assume parameters properly resolve references or that
+// such references actually resolve to a parameter object.
+//
+// Upon error, invoke a ErrorOnParamFunc callback with the erroneous
+// parameters. If the callback is set to nil, panics upon errors.
+func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter {
+ gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter {
+ bag := make(map[string]spec.Parameter)
+ s.paramsAsMap(pi.Parameters, bag, callmeOnError)
+ s.paramsAsMap(op.Parameters, bag, callmeOnError)
+
+ var res []spec.Parameter
+ for _, v := range bag {
+ res = append(res, v)
+ }
+
+ return res
+ }
+
+ for _, pi := range s.spec.Paths.Paths {
+ if pi.Get != nil && pi.Get.ID == operationID {
+ return gatherParams(&pi, pi.Get) //#nosec
+ }
+ if pi.Head != nil && pi.Head.ID == operationID {
+ return gatherParams(&pi, pi.Head) //#nosec
+ }
+ if pi.Options != nil && pi.Options.ID == operationID {
+ return gatherParams(&pi, pi.Options) //#nosec
+ }
+ if pi.Post != nil && pi.Post.ID == operationID {
+ return gatherParams(&pi, pi.Post) //#nosec
+ }
+ if pi.Patch != nil && pi.Patch.ID == operationID {
+ return gatherParams(&pi, pi.Patch) //#nosec
+ }
+ if pi.Put != nil && pi.Put.ID == operationID {
+ return gatherParams(&pi, pi.Put) //#nosec
+ }
+ if pi.Delete != nil && pi.Delete.ID == operationID {
+ return gatherParams(&pi, pi.Delete) //#nosec
+ }
+ }
+
+ return nil
+}
+
+// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that
+// apply for the method and path.
+//
+// Assumes parameters properly resolve references if any and that
+// such references actually resolve to a parameter object.
+// Otherwise, panics.
+func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter {
+ return s.SafeParamsFor(method, path, nil)
+}
+
+// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that
+// apply for the method and path.
+//
+// Does not assume parameters properly resolve references or that
+// such references actually resolve to a parameter object.
+//
+// Upon error, invoke a ErrorOnParamFunc callback with the erroneous
+// parameters. If the callback is set to nil, panics upon errors.
+func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter {
+ res := make(map[string]spec.Parameter)
+ if pi, ok := s.spec.Paths.Paths[path]; ok {
+ s.paramsAsMap(pi.Parameters, res, callmeOnError)
+ s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError)
+ }
+
+ return res
+}
+
+// OperationForName gets the operation for the given id
+func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) {
+ for method, pathItem := range s.operations {
+ for path, op := range pathItem {
+ if operationID == op.ID {
+ return method, path, op, true
+ }
+ }
+ }
+
+ return "", "", nil, false
+}
+
+// OperationFor the given method and path
+func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) {
+ if mp, ok := s.operations[strings.ToUpper(method)]; ok {
+ op, fn := mp[path]
+
+ return op, fn
+ }
+
+ return nil, false
+}
+
+// Operations gathers all the operations specified in the spec document
+func (s *Spec) Operations() map[string]map[string]*spec.Operation {
+ return s.operations
+}
+
+func (s *Spec) structMapKeys(mp map[string]struct{}) []string {
+ if len(mp) == 0 {
+ return nil
+ }
+
+ result := make([]string, 0, len(mp))
+ for k := range mp {
+ result = append(result, k)
+ }
+
+ return result
+}
+
+// AllPaths returns all the paths in the swagger spec
+func (s *Spec) AllPaths() map[string]spec.PathItem {
+ if s.spec == nil || s.spec.Paths == nil {
+ return nil
+ }
+
+ return s.spec.Paths.Paths
+}
+
+// OperationIDs gets all the operation ids based on method an dpath
+func (s *Spec) OperationIDs() []string {
+ if len(s.operations) == 0 {
+ return nil
+ }
+
+ result := make([]string, 0, len(s.operations))
+ for method, v := range s.operations {
+ for p, o := range v {
+ if o.ID != "" {
+ result = append(result, o.ID)
+ } else {
+ result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p))
+ }
+ }
+ }
+
+ return result
+}
+
+// OperationMethodPaths gets all the operation ids based on method an dpath
+func (s *Spec) OperationMethodPaths() []string {
+ if len(s.operations) == 0 {
+ return nil
+ }
+
+ result := make([]string, 0, len(s.operations))
+ for method, v := range s.operations {
+ for p := range v {
+ result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p))
+ }
+ }
+
+ return result
+}
+
+// RequiredConsumes gets all the distinct consumes that are specified in the specification document
+func (s *Spec) RequiredConsumes() []string {
+ return s.structMapKeys(s.consumes)
+}
+
+// RequiredProduces gets all the distinct produces that are specified in the specification document
+func (s *Spec) RequiredProduces() []string {
+ return s.structMapKeys(s.produces)
+}
+
+// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec
+func (s *Spec) RequiredSecuritySchemes() []string {
+ return s.structMapKeys(s.authSchemes)
+}
+
+// SchemaRef is a reference to a schema
+type SchemaRef struct {
+ Name string
+ Ref spec.Ref
+ Schema *spec.Schema
+ TopLevel bool
+}
+
+// SchemasWithAllOf returns schema references to all schemas that are defined
+// with an allOf key
+func (s *Spec) SchemasWithAllOf() (result []SchemaRef) {
+ for _, v := range s.allOfs {
+ result = append(result, v)
+ }
+
+ return
+}
+
+// AllDefinitions returns schema references for all the definitions that were discovered
+func (s *Spec) AllDefinitions() (result []SchemaRef) {
+ for _, v := range s.allSchemas {
+ result = append(result, v)
+ }
+
+ return
+}
+
+// AllDefinitionReferences returns json refs for all the discovered schemas
+func (s *Spec) AllDefinitionReferences() (result []string) {
+ for _, v := range s.references.schemas {
+ result = append(result, v.String())
+ }
+
+ return
+}
+
+// AllParameterReferences returns json refs for all the discovered parameters
+func (s *Spec) AllParameterReferences() (result []string) {
+ for _, v := range s.references.parameters {
+ result = append(result, v.String())
+ }
+
+ return
+}
+
+// AllResponseReferences returns json refs for all the discovered responses
+func (s *Spec) AllResponseReferences() (result []string) {
+ for _, v := range s.references.responses {
+ result = append(result, v.String())
+ }
+
+ return
+}
+
+// AllPathItemReferences returns the references for all the items
+func (s *Spec) AllPathItemReferences() (result []string) {
+ for _, v := range s.references.pathItems {
+ result = append(result, v.String())
+ }
+
+ return
+}
+
+// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers).
+//
+// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid
+// Swagger 2.0 spec.
+func (s *Spec) AllItemsReferences() (result []string) {
+ for _, v := range s.references.items {
+ result = append(result, v.String())
+ }
+
+ return
+}
+
+// AllReferences returns all the references found in the document, with possible duplicates
+func (s *Spec) AllReferences() (result []string) {
+ for _, v := range s.references.allRefs {
+ result = append(result, v.String())
+ }
+
+ return
+}
+
+// AllRefs returns all the unique references found in the document
+func (s *Spec) AllRefs() (result []spec.Ref) {
+ set := make(map[string]struct{})
+ for _, v := range s.references.allRefs {
+ a := v.String()
+ if a == "" {
+ continue
+ }
+
+ if _, ok := set[a]; !ok {
+ set[a] = struct{}{}
+ result = append(result, v)
+ }
+ }
+
+ return
+}
+
+func cloneStringMap(source map[string]string) map[string]string {
+ res := make(map[string]string, len(source))
+ for k, v := range source {
+ res[k] = v
+ }
+
+ return res
+}
+
+func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} {
+ res := make(map[string][]interface{}, len(source))
+ for k, v := range source {
+ res[k] = v
+ }
+
+ return res
+}
+
+// ParameterPatterns returns all the patterns found in parameters
+// the map is cloned to avoid accidental changes
+func (s *Spec) ParameterPatterns() map[string]string {
+ return cloneStringMap(s.patterns.parameters)
+}
+
+// HeaderPatterns returns all the patterns found in response headers
+// the map is cloned to avoid accidental changes
+func (s *Spec) HeaderPatterns() map[string]string {
+ return cloneStringMap(s.patterns.headers)
+}
+
+// ItemsPatterns returns all the patterns found in simple array items
+// the map is cloned to avoid accidental changes
+func (s *Spec) ItemsPatterns() map[string]string {
+ return cloneStringMap(s.patterns.items)
+}
+
+// SchemaPatterns returns all the patterns found in schemas
+// the map is cloned to avoid accidental changes
+func (s *Spec) SchemaPatterns() map[string]string {
+ return cloneStringMap(s.patterns.schemas)
+}
+
+// AllPatterns returns all the patterns found in the spec
+// the map is cloned to avoid accidental changes
+func (s *Spec) AllPatterns() map[string]string {
+ return cloneStringMap(s.patterns.allPatterns)
+}
+
+// ParameterEnums returns all the enums found in parameters
+// the map is cloned to avoid accidental changes
+func (s *Spec) ParameterEnums() map[string][]interface{} {
+ return cloneEnumMap(s.enums.parameters)
+}
+
+// HeaderEnums returns all the enums found in response headers
+// the map is cloned to avoid accidental changes
+func (s *Spec) HeaderEnums() map[string][]interface{} {
+ return cloneEnumMap(s.enums.headers)
+}
+
+// ItemsEnums returns all the enums found in simple array items
+// the map is cloned to avoid accidental changes
+func (s *Spec) ItemsEnums() map[string][]interface{} {
+ return cloneEnumMap(s.enums.items)
+}
+
+// SchemaEnums returns all the enums found in schemas
+// the map is cloned to avoid accidental changes
+func (s *Spec) SchemaEnums() map[string][]interface{} {
+ return cloneEnumMap(s.enums.schemas)
+}
+
+// AllEnums returns all the enums found in the spec
+// the map is cloned to avoid accidental changes
+func (s *Spec) AllEnums() map[string][]interface{} {
+ return cloneEnumMap(s.enums.allEnums)
+}
diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go
new file mode 100644
index 00000000..33c15704
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/debug.go
@@ -0,0 +1,23 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package analysis
+
+import (
+ "os"
+
+ "github.com/go-openapi/analysis/internal/debug"
+)
+
+var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "")
diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go
new file mode 100644
index 00000000..e8d9f9b1
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/doc.go
@@ -0,0 +1,43 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package analysis provides methods to work with a Swagger specification document from
+package go-openapi/spec.
+
+## Analyzing a specification
+
+An analysed specification object (type Spec) provides methods to work with swagger definition.
+
+## Flattening or expanding a specification
+
+Flattening a specification bundles all remote $ref in the main spec document.
+Depending on flattening options, additional preprocessing may take place:
+ - full flattening: replacing all inline complex constructs by a named entry in #/definitions
+ - expand: replace all $ref's in the document by their expanded content
+
+## Merging several specifications
+
+Mixin several specifications merges all Swagger constructs, and warns about found conflicts.
+
+## Fixing a specification
+
+Unmarshalling a specification with golang json unmarshalling may lead to
+some unwanted result on present but empty fields.
+
+## Analyzing a Swagger schema
+
+Swagger schemas are analyzed to determine their complexity and qualify their content.
+*/
+package analysis
diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go
new file mode 100644
index 00000000..7c2ca084
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/fixer.go
@@ -0,0 +1,79 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package analysis
+
+import "github.com/go-openapi/spec"
+
+// FixEmptyResponseDescriptions replaces empty ("") response
+// descriptions in the input with "(empty)" to ensure that the
+// resulting Swagger is stays valid. The problem appears to arise
+// from reading in valid specs that have a explicit response
+// description of "" (valid, response.description is required), but
+// due to zero values being omitted upon re-serializing (omitempty) we
+// lose them unless we stick some chars in there.
+func FixEmptyResponseDescriptions(s *spec.Swagger) {
+ for k, v := range s.Responses {
+ FixEmptyDesc(&v) //#nosec
+ s.Responses[k] = v
+ }
+
+ if s.Paths == nil {
+ return
+ }
+
+ for _, v := range s.Paths.Paths {
+ if v.Get != nil {
+ FixEmptyDescs(v.Get.Responses)
+ }
+ if v.Put != nil {
+ FixEmptyDescs(v.Put.Responses)
+ }
+ if v.Post != nil {
+ FixEmptyDescs(v.Post.Responses)
+ }
+ if v.Delete != nil {
+ FixEmptyDescs(v.Delete.Responses)
+ }
+ if v.Options != nil {
+ FixEmptyDescs(v.Options.Responses)
+ }
+ if v.Head != nil {
+ FixEmptyDescs(v.Head.Responses)
+ }
+ if v.Patch != nil {
+ FixEmptyDescs(v.Patch.Responses)
+ }
+ }
+}
+
+// FixEmptyDescs adds "(empty)" as the description for any Response in
+// the given Responses object that doesn't already have one.
+func FixEmptyDescs(rs *spec.Responses) {
+ FixEmptyDesc(rs.Default)
+ for k, v := range rs.StatusCodeResponses {
+ FixEmptyDesc(&v) //#nosec
+ rs.StatusCodeResponses[k] = v
+ }
+}
+
+// FixEmptyDesc adds "(empty)" as the description to the given
+// Response object if it doesn't already have one and isn't a
+// ref. No-op on nil input.
+func FixEmptyDesc(rs *spec.Response) {
+ if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil {
+ return
+ }
+ rs.Description = "(empty)"
+}
diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go
new file mode 100644
index 00000000..ebedcc9d
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/flatten.go
@@ -0,0 +1,814 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package analysis
+
+import (
+ "fmt"
+ "log"
+ "path"
+ "sort"
+ "strings"
+
+ "github.com/go-openapi/analysis/internal/flatten/normalize"
+ "github.com/go-openapi/analysis/internal/flatten/operations"
+ "github.com/go-openapi/analysis/internal/flatten/replace"
+ "github.com/go-openapi/analysis/internal/flatten/schutils"
+ "github.com/go-openapi/analysis/internal/flatten/sortref"
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/spec"
+)
+
+const definitionsPath = "#/definitions"
+
+// newRef stores information about refs created during the flattening process
+type newRef struct {
+ key string
+ newName string
+ path string
+ isOAIGen bool
+ resolved bool
+ schema *spec.Schema
+ parents []string
+}
+
+// context stores intermediary results from flatten
+type context struct {
+ newRefs map[string]*newRef
+ warnings []string
+ resolved map[string]string
+}
+
+func newContext() *context {
+ return &context{
+ newRefs: make(map[string]*newRef, 150),
+ warnings: make([]string, 0),
+ resolved: make(map[string]string, 50),
+ }
+}
+
+// Flatten an analyzed spec and produce a self-contained spec bundle.
+//
+// There is a minimal and a full flattening mode.
+//
+// Minimally flattening a spec means:
+// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left
+// unscathed)
+// - Importing external (http, file) references so they become internal to the document
+// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers
+// like "$ref": "#/definitions/myObject/allOfs/1")
+//
+// A minimally flattened spec thus guarantees the following properties:
+// - all $refs point to a local definition (i.e. '#/definitions/...')
+// - definitions are unique
+//
+// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they
+// represent a complex schema or express commonality in the spec.
+// Otherwise, they are simply expanded.
+// Self-referencing JSON pointers cannot resolve to a type and trigger an error.
+//
+// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger.
+//
+// Fully flattening a spec means:
+// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion.
+//
+// By complex, we mean every JSON object with some properties.
+// Arrays, when they do not define a tuple,
+// or empty objects with or without additionalProperties, are not considered complex and remain inline.
+//
+// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions
+// have been created.
+//
+// Available flattening options:
+// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched
+// - Expand: expand all $ref's in the document (inoperant if Minimal set to true)
+// - Verbose: croaks about name conflicts detected
+// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening
+//
+// NOTE: expansion removes all $ref save circular $ref, which remain in place
+//
+// TODO: additional options
+// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a
+// x-go-name extension
+// - LiftAllOfs:
+// - limit the flattening of allOf members when simple objects
+// - merge allOf with validation only
+// - merge allOf with extensions only
+// - ...
+func Flatten(opts FlattenOpts) error {
+ debugLog("FlattenOpts: %#v", opts)
+
+ opts.flattenContext = newContext()
+
+ // 1. Recursively expand responses, parameters, path items and items in simple schemas.
+ //
+ // This simplifies the spec and leaves only the $ref's in schema objects.
+ if err := expand(&opts); err != nil {
+ return err
+ }
+
+ // 2. Strip the current document from absolute $ref's that actually a in the root,
+ // so we can recognize them as proper definitions
+ //
+ // In particular, this works around issue go-openapi/spec#76: leading absolute file in $ref is stripped
+ if err := normalizeRef(&opts); err != nil {
+ return err
+ }
+
+ // 3. Optionally remove shared parameters and responses already expanded (now unused).
+ //
+ // Operation parameters (i.e. under paths) remain.
+ if opts.RemoveUnused {
+ removeUnusedShared(&opts)
+ }
+
+ // 4. Import all remote references.
+ if err := importReferences(&opts); err != nil {
+ return err
+ }
+
+ // 5. full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps)
+ if !opts.Minimal && !opts.Expand {
+ if err := nameInlinedSchemas(&opts); err != nil {
+ return err
+ }
+ }
+
+ // 6. Rewrite JSON pointers other than $ref to named definitions
+ // and attempt to resolve conflicting names whenever possible.
+ if err := stripPointersAndOAIGen(&opts); err != nil {
+ return err
+ }
+
+ // 7. Strip the spec from unused definitions
+ if opts.RemoveUnused {
+ removeUnused(&opts)
+ }
+
+ // 8. Issue warning notifications, if any
+ opts.croak()
+
+ // TODO: simplify known schema patterns to flat objects with properties
+ // examples:
+ // - lift simple allOf object,
+ // - empty allOf with validation only or extensions only
+ // - rework allOf arrays
+ // - rework allOf additionalProperties
+
+ return nil
+}
+
+func expand(opts *FlattenOpts) error {
+ if err := spec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil {
+ return err
+ }
+
+ opts.Spec.reload() // re-analyze
+
+ return nil
+}
+
+// normalizeRef strips the current file from any absolute file $ref. This works around issue go-openapi/spec#76:
+// leading absolute file in $ref is stripped
+func normalizeRef(opts *FlattenOpts) error {
+ debugLog("normalizeRef")
+
+ altered := false
+ for k, w := range opts.Spec.references.allRefs {
+ if !strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS
+ continue
+ }
+
+ altered = true
+ debugLog("stripping absolute path for: %s", w.String())
+
+ // strip the base path from definition
+ if err := replace.UpdateRef(opts.Swagger(), k,
+ spec.MustCreateRef(path.Join(definitionsPath, path.Base(w.String())))); err != nil {
+ return err
+ }
+ }
+
+ if altered {
+ opts.Spec.reload() // re-analyze
+ }
+
+ return nil
+}
+
+func removeUnusedShared(opts *FlattenOpts) {
+ opts.Swagger().Parameters = nil
+ opts.Swagger().Responses = nil
+
+ opts.Spec.reload() // re-analyze
+}
+
+func importReferences(opts *FlattenOpts) error {
+ var (
+ imported bool
+ err error
+ )
+
+ for !imported && err == nil {
+ // iteratively import remote references until none left.
+ // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen")
+ imported, err = importExternalReferences(opts)
+
+ opts.Spec.reload() // re-analyze
+ }
+
+ return err
+}
+
+// nameInlinedSchemas replaces every complex inline construct by a named definition.
+func nameInlinedSchemas(opts *FlattenOpts) error {
+ debugLog("nameInlinedSchemas")
+
+ namer := &InlineSchemaNamer{
+ Spec: opts.Swagger(),
+ Operations: operations.AllOpRefsByRef(opts.Spec, nil),
+ flattenContext: opts.flattenContext,
+ opts: opts,
+ }
+
+ depthFirst := sortref.DepthFirst(opts.Spec.allSchemas)
+ for _, key := range depthFirst {
+ sch := opts.Spec.allSchemas[key]
+ if sch.Schema == nil || sch.Schema.Ref.String() != "" || sch.TopLevel {
+ continue
+ }
+
+ asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath})
+ if err != nil {
+ return fmt.Errorf("schema analysis [%s]: %w", key, err)
+ }
+
+ if asch.isAnalyzedAsComplex() { // move complex schemas to definitions
+ if err := namer.Name(key, sch.Schema, asch); err != nil {
+ return err
+ }
+ }
+ }
+
+ opts.Spec.reload() // re-analyze
+
+ return nil
+}
+
+func removeUnused(opts *FlattenOpts) {
+ for removeUnusedSinglePass(opts) {
+ // continue until no unused definition remains
+ }
+}
+
+func removeUnusedSinglePass(opts *FlattenOpts) (hasRemoved bool) {
+ expected := make(map[string]struct{})
+ for k := range opts.Swagger().Definitions {
+ expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{}
+ }
+
+ for _, k := range opts.Spec.AllDefinitionReferences() {
+ delete(expected, k)
+ }
+
+ for k := range expected {
+ hasRemoved = true
+ debugLog("removing unused definition %s", path.Base(k))
+ if opts.Verbose {
+ log.Printf("info: removing unused definition: %s", path.Base(k))
+ }
+ delete(opts.Swagger().Definitions, path.Base(k))
+ }
+
+ opts.Spec.reload() // re-analyze
+
+ return hasRemoved
+}
+
+func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error {
+ // rewrite ref with already resolved external ref (useful for cyclical refs):
+ // rewrite external refs to local ones
+ debugLog("resolving known ref [%s] to %s", refStr, newName)
+
+ for _, key := range entry.Keys {
+ if err := replace.UpdateRef(opts.Swagger(), key, spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) error {
+ var (
+ isOAIGen bool
+ newName string
+ )
+
+ debugLog("resolving schema from remote $ref [%s]", refStr)
+
+ sch, err := spec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false))
+ if err != nil {
+ return fmt.Errorf("could not resolve schema: %w", err)
+ }
+
+ // at this stage only $ref analysis matters
+ partialAnalyzer := &Spec{
+ references: referenceAnalysis{},
+ patterns: patternAnalysis{},
+ enums: enumAnalysis{},
+ }
+ partialAnalyzer.reset()
+ partialAnalyzer.analyzeSchema("", sch, "/")
+
+ // now rewrite those refs with rebase
+ for key, ref := range partialAnalyzer.references.allRefs {
+ if err := replace.UpdateRef(sch, key, spec.MustCreateRef(normalize.RebaseRef(entry.Ref.String(), ref.String()))); err != nil {
+ return fmt.Errorf("failed to rewrite ref for key %q at %s: %w", key, entry.Ref.String(), err)
+ }
+ }
+
+ // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name
+ newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref, opts))
+ debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen)
+
+ opts.flattenContext.resolved[refStr] = newName
+
+ // rewrite the external refs to local ones
+ for _, key := range entry.Keys {
+ if err := replace.UpdateRef(opts.Swagger(), key,
+ spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil {
+ return err
+ }
+
+ // keep track of created refs
+ resolved := false
+ if _, ok := opts.flattenContext.newRefs[key]; ok {
+ resolved = opts.flattenContext.newRefs[key].resolved
+ }
+
+ debugLog("keeping track of ref: %s (%s), resolved: %t", key, newName, resolved)
+ opts.flattenContext.newRefs[key] = &newRef{
+ key: key,
+ newName: newName,
+ path: path.Join(definitionsPath, newName),
+ isOAIGen: isOAIGen,
+ resolved: resolved,
+ schema: sch,
+ }
+ }
+
+ // add the resolved schema to the definitions
+ schutils.Save(opts.Swagger(), newName, sch)
+
+ return nil
+}
+
+// importExternalReferences iteratively digs remote references and imports them into the main schema.
+//
+// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported.
+//
+// This returns true when no more remote references can be found.
+func importExternalReferences(opts *FlattenOpts) (bool, error) {
+ debugLog("importExternalReferences")
+
+ groupedRefs := sortref.ReverseIndex(opts.Spec.references.schemas, opts.BasePath)
+ sortedRefStr := make([]string, 0, len(groupedRefs))
+ if opts.flattenContext == nil {
+ opts.flattenContext = newContext()
+ }
+
+ // sort $ref resolution to ensure deterministic name conflict resolution
+ for refStr := range groupedRefs {
+ sortedRefStr = append(sortedRefStr, refStr)
+ }
+ sort.Strings(sortedRefStr)
+
+ complete := true
+
+ for _, refStr := range sortedRefStr {
+ entry := groupedRefs[refStr]
+ if entry.Ref.HasFragmentOnly {
+ continue
+ }
+
+ complete = false
+
+ newName := opts.flattenContext.resolved[refStr]
+ if newName != "" {
+ if err := importKnownRef(entry, refStr, newName, opts); err != nil {
+ return false, err
+ }
+
+ continue
+ }
+
+ // resolve schemas
+ if err := importNewRef(entry, refStr, opts); err != nil {
+ return false, err
+ }
+ }
+
+ // maintains ref index entries
+ for k := range opts.flattenContext.newRefs {
+ r := opts.flattenContext.newRefs[k]
+
+ // update tracking with resolved schemas
+ if r.schema.Ref.String() != "" {
+ ref := spec.MustCreateRef(r.path)
+ sch, err := spec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false))
+ if err != nil {
+ return false, fmt.Errorf("could not resolve schema: %w", err)
+ }
+
+ r.schema = sch
+ }
+
+ if r.path == k {
+ continue
+ }
+
+ // update tracking with renamed keys: got a cascade of refs
+ renamed := *r
+ renamed.key = r.path
+ opts.flattenContext.newRefs[renamed.path] = &renamed
+
+ // indirect ref
+ r.newName = path.Base(k)
+ r.schema = spec.RefSchema(r.path)
+ r.path = k
+ r.isOAIGen = strings.Contains(k, "OAIGen")
+ }
+
+ return complete, nil
+}
+
+// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler.
+// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible.
+func stripPointersAndOAIGen(opts *FlattenOpts) error {
+ // name all JSON pointers to anonymous documents
+ if err := namePointers(opts); err != nil {
+ return err
+ }
+
+ // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts)
+ hasIntroducedPointerOrInline, ers := stripOAIGen(opts)
+ if ers != nil {
+ return ers
+ }
+
+ // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers
+ for hasIntroducedPointerOrInline {
+ if !opts.Minimal {
+ opts.Spec.reload() // re-analyze
+ if err := nameInlinedSchemas(opts); err != nil {
+ return err
+ }
+ }
+
+ if err := namePointers(opts); err != nil {
+ return err
+ }
+
+ // restrip and re-analyze
+ var err error
+ if hasIntroducedPointerOrInline, err = stripOAIGen(opts); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions.
+//
+// A dedupe is deemed unnecessary whenever:
+// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining)
+// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to
+// the first parent.
+//
+// This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate
+// pointer and name resolution again.
+func stripOAIGen(opts *FlattenOpts) (bool, error) {
+ debugLog("stripOAIGen")
+ replacedWithComplex := false
+
+ // figure out referers of OAIGen definitions (doing it before the ref start mutating)
+ for _, r := range opts.flattenContext.newRefs {
+ updateRefParents(opts.Spec.references.allRefs, r)
+ }
+
+ for k := range opts.flattenContext.newRefs {
+ r := opts.flattenContext.newRefs[k]
+ debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s",
+ k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String())
+
+ if !r.isOAIGen || len(r.parents) == 0 {
+ continue
+ }
+
+ hasReplacedWithComplex, err := stripOAIGenForRef(opts, k, r)
+ if err != nil {
+ return replacedWithComplex, err
+ }
+
+ replacedWithComplex = replacedWithComplex || hasReplacedWithComplex
+ }
+
+ debugLog("replacedWithComplex: %t", replacedWithComplex)
+ opts.Spec.reload() // re-analyze
+
+ return replacedWithComplex, nil
+}
+
+// updateRefParents updates all parents of an updated $ref
+func updateRefParents(allRefs map[string]spec.Ref, r *newRef) {
+ if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping)
+ return
+ }
+ for k, v := range allRefs {
+ if r.path != v.String() {
+ continue
+ }
+
+ found := false
+ for _, p := range r.parents {
+ if p == k {
+ found = true
+
+ break
+ }
+ }
+ if !found {
+ r.parents = append(r.parents, k)
+ }
+ }
+}
+
+func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) {
+ replacedWithComplex := false
+
+ pr := sortref.TopmostFirst(r.parents)
+
+ // rewrite first parent schema in hierarchical then lexicographical order
+ debugLog("rewrite first parent %s with schema", pr[0])
+ if err := replace.UpdateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil {
+ return false, err
+ }
+
+ if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen {
+ // update parent in ref index entry
+ debugLog("update parent entry: %s", pr[0])
+ pa.schema = r.schema
+ pa.resolved = false
+ replacedWithComplex = true
+ }
+
+ // rewrite other parents to point to first parent
+ if len(pr) > 1 {
+ for _, p := range pr[1:] {
+ replacingRef := spec.MustCreateRef(pr[0])
+
+ // set complex when replacing ref is an anonymous jsonpointer: further processing may be required
+ replacedWithComplex = replacedWithComplex || path.Dir(replacingRef.String()) != definitionsPath
+ debugLog("rewrite parent with ref: %s", replacingRef.String())
+
+ // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places).
+ // Those are stripped later on.
+ if err := replace.UpdateRef(opts.Swagger(), p, replacingRef); err != nil {
+ return false, err
+ }
+
+ if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen {
+ // update parent in ref index
+ debugLog("update parent entry: %s", p)
+ pa.schema = r.schema
+ pa.resolved = false
+ replacedWithComplex = true
+ }
+ }
+ }
+
+ // remove OAIGen definition
+ debugLog("removing definition %s", path.Base(r.path))
+ delete(opts.Swagger().Definitions, path.Base(r.path))
+
+ // propagate changes in ref index for keys which have this one as a parent
+ for kk, value := range opts.flattenContext.newRefs {
+ if kk == k || !value.isOAIGen || value.resolved {
+ continue
+ }
+
+ found := false
+ newParents := make([]string, 0, len(value.parents))
+ for _, parent := range value.parents {
+ switch {
+ case parent == r.path:
+ found = true
+ parent = pr[0]
+ case strings.HasPrefix(parent, r.path+"/"):
+ found = true
+ parent = path.Join(pr[0], strings.TrimPrefix(parent, r.path))
+ }
+
+ newParents = append(newParents, parent)
+ }
+
+ if found {
+ value.parents = newParents
+ }
+ }
+
+ // mark naming conflict as resolved
+ debugLog("marking naming conflict resolved for key: %s", r.key)
+ opts.flattenContext.newRefs[r.key].isOAIGen = false
+ opts.flattenContext.newRefs[r.key].resolved = true
+
+ // determine if the previous substitution did inline a complex schema
+ if r.schema != nil && r.schema.Ref.String() == "" { // inline schema
+ asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath})
+ if err != nil {
+ return false, err
+ }
+
+ debugLog("re-inlined schema: parent: %s, %t", pr[0], asch.isAnalyzedAsComplex())
+ replacedWithComplex = replacedWithComplex || !(path.Dir(pr[0]) == definitionsPath) && asch.isAnalyzedAsComplex()
+ }
+
+ return replacedWithComplex, nil
+}
+
+// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions.
+//
+// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself.
+// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used).
+func namePointers(opts *FlattenOpts) error {
+ debugLog("name pointers")
+
+ refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas))
+ for k, ref := range opts.Spec.references.allRefs {
+ debugLog("name pointers: %q => %#v", k, ref)
+ if path.Dir(ref.String()) == definitionsPath {
+ // this a ref to a top-level definition: ok
+ continue
+ }
+
+ result, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), ref)
+ if err != nil {
+ return fmt.Errorf("at %s, %w", k, err)
+ }
+
+ replacingRef := result.Ref
+ sch := result.Schema
+ if opts.flattenContext != nil {
+ opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...)
+ }
+
+ debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String())
+ refsToReplace[k] = SchemaRef{
+ Name: k, // caller
+ Ref: replacingRef, // called
+ Schema: sch,
+ TopLevel: path.Dir(replacingRef.String()) == definitionsPath,
+ }
+ }
+
+ depthFirst := sortref.DepthFirst(refsToReplace)
+ namer := &InlineSchemaNamer{
+ Spec: opts.Swagger(),
+ Operations: operations.AllOpRefsByRef(opts.Spec, nil),
+ flattenContext: opts.flattenContext,
+ opts: opts,
+ }
+
+ for _, key := range depthFirst {
+ v := refsToReplace[key]
+ // update current replacement, which may have been updated by previous changes of deeper elements
+ result, erd := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), v.Ref)
+ if erd != nil {
+ return fmt.Errorf("at %s, %w", key, erd)
+ }
+
+ if opts.flattenContext != nil {
+ opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...)
+ }
+
+ v.Ref = result.Ref
+ v.Schema = result.Schema
+ v.TopLevel = path.Dir(result.Ref.String()) == definitionsPath
+ debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String())
+
+ if v.TopLevel {
+ debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String())
+
+ // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref
+ if err := replace.UpdateRef(opts.Swagger(), key, v.Ref); err != nil {
+ return err
+ }
+
+ continue
+ }
+
+ if err := flattenAnonPointer(key, v, refsToReplace, namer, opts); err != nil {
+ return err
+ }
+ }
+
+ opts.Spec.reload() // re-analyze
+
+ return nil
+}
+
+func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]SchemaRef, namer *InlineSchemaNamer, opts *FlattenOpts) error {
+ // this is a JSON pointer to an anonymous document (internal or external):
+ // create a definition for this schema when:
+ // - it is a complex schema
+ // - or it is pointed by more than one $ref (i.e. expresses commonality)
+ // otherwise, expand the pointer (single reference to a simple type)
+ //
+ // The named definition for this follows the target's key, not the caller's
+ debugLog("namePointers at %s for %s", key, v.Ref.String())
+
+ // qualify the expanded schema
+ asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath})
+ if ers != nil {
+ return fmt.Errorf("schema analysis [%s]: %w", key, ers)
+ }
+ callers := make([]string, 0, 64)
+
+ debugLog("looking for callers")
+
+ an := New(opts.Swagger())
+ for k, w := range an.references.allRefs {
+ r, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), w)
+ if err != nil {
+ return fmt.Errorf("at %s, %w", key, err)
+ }
+
+ if opts.flattenContext != nil {
+ opts.flattenContext.warnings = append(opts.flattenContext.warnings, r.Warnings...)
+ }
+
+ if r.Ref.String() == v.Ref.String() {
+ callers = append(callers, k)
+ }
+ }
+
+ debugLog("callers for %s: %d", v.Ref.String(), len(callers))
+ if len(callers) == 0 {
+ // has already been updated and resolved
+ return nil
+ }
+
+ parts := sortref.KeyParts(v.Ref.String())
+ debugLog("number of callers for %s: %d", v.Ref.String(), len(callers))
+
+ // identifying edge case when the namer did nothing because we point to a non-schema object
+ // no definition is created and we expand the $ref for all callers
+ debugLog("decide what to do with the schema pointed to: asch.IsSimpleSchema=%t, len(callers)=%d, parts.IsSharedParam=%t, parts.IsSharedResponse=%t",
+ asch.IsSimpleSchema, len(callers), parts.IsSharedParam(), parts.IsSharedResponse(),
+ )
+
+ if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() {
+ debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String())
+ if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil {
+ return err
+ }
+
+ // regular case: we named the $ref as a definition, and we move all callers to this new $ref
+ for _, caller := range callers {
+ if caller == key {
+ continue
+ }
+
+ // move $ref for next to resolve
+ debugLog("identified caller of %s at [%s]", v.Ref.String(), caller)
+ c := refsToReplace[caller]
+ c.Ref = v.Ref
+ refsToReplace[caller] = c
+ }
+
+ return nil
+ }
+
+ // everything that is a simple schema and not factorizable is expanded
+ debugLog("expand JSON pointer for key=%s", key)
+
+ if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil {
+ return err
+ }
+ // NOTE: there is no other caller to update
+
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go
new file mode 100644
index 00000000..c7d7938e
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/flatten_name.go
@@ -0,0 +1,308 @@
+package analysis
+
+import (
+ "fmt"
+ "path"
+ "sort"
+ "strings"
+
+ "github.com/go-openapi/analysis/internal/flatten/operations"
+ "github.com/go-openapi/analysis/internal/flatten/replace"
+ "github.com/go-openapi/analysis/internal/flatten/schutils"
+ "github.com/go-openapi/analysis/internal/flatten/sortref"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/swag"
+)
+
+// InlineSchemaNamer finds a new name for an inlined type
+type InlineSchemaNamer struct {
+ Spec *spec.Swagger
+ Operations map[string]operations.OpRef
+ flattenContext *context
+ opts *FlattenOpts
+}
+
+// Name yields a new name for the inline schema
+func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *AnalyzedSchema) error {
+ debugLog("naming inlined schema at %s", key)
+
+ parts := sortref.KeyParts(key)
+ for _, name := range namesFromKey(parts, aschema, isn.Operations) {
+ if name == "" {
+ continue
+ }
+
+ // create unique name
+ mangle := mangler(isn.opts)
+ newName, isOAIGen := uniqifyName(isn.Spec.Definitions, mangle(name))
+
+ // clone schema
+ sch := schutils.Clone(schema)
+
+ // replace values on schema
+ debugLog("rewriting schema to ref: key=%s with new name: %s", key, newName)
+ if err := replace.RewriteSchemaToRef(isn.Spec, key,
+ spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil {
+ return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err)
+ }
+
+ // rewrite any dependent $ref pointing to this place,
+ // when not already pointing to a top-level definition.
+ //
+ // NOTE: this is important if such referers use arbitrary JSON pointers.
+ an := New(isn.Spec)
+ for k, v := range an.references.allRefs {
+ r, erd := replace.DeepestRef(isn.opts.Swagger(), isn.opts.ExpandOpts(false), v)
+ if erd != nil {
+ return fmt.Errorf("at %s, %w", k, erd)
+ }
+
+ if isn.opts.flattenContext != nil {
+ isn.opts.flattenContext.warnings = append(isn.opts.flattenContext.warnings, r.Warnings...)
+ }
+
+ if r.Ref.String() != key && (r.Ref.String() != path.Join(definitionsPath, newName) || path.Dir(v.String()) == definitionsPath) {
+ continue
+ }
+
+ debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String())
+
+ // rewrite $ref to the new target
+ if err := replace.UpdateRef(isn.Spec, k,
+ spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil {
+ return err
+ }
+ }
+
+ // NOTE: this extension is currently not used by go-swagger (provided for information only)
+ sch.AddExtension("x-go-gen-location", GenLocation(parts))
+
+ // save cloned schema to definitions
+ schutils.Save(isn.Spec, newName, sch)
+
+ // keep track of created refs
+ if isn.flattenContext == nil {
+ continue
+ }
+
+ debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen)
+ resolved := false
+
+ if _, ok := isn.flattenContext.newRefs[key]; ok {
+ resolved = isn.flattenContext.newRefs[key].resolved
+ }
+
+ isn.flattenContext.newRefs[key] = &newRef{
+ key: key,
+ newName: newName,
+ path: path.Join(definitionsPath, newName),
+ isOAIGen: isOAIGen,
+ resolved: resolved,
+ schema: sch,
+ }
+ }
+
+ return nil
+}
+
+// uniqifyName yields a unique name for a definition
+func uniqifyName(definitions spec.Definitions, name string) (string, bool) {
+ isOAIGen := false
+ if name == "" {
+ name = "oaiGen"
+ isOAIGen = true
+ }
+
+ if len(definitions) == 0 {
+ return name, isOAIGen
+ }
+
+ unq := true
+ for k := range definitions {
+ if strings.EqualFold(k, name) {
+ unq = false
+
+ break
+ }
+ }
+
+ if unq {
+ return name, isOAIGen
+ }
+
+ name += "OAIGen"
+ isOAIGen = true
+ var idx int
+ unique := name
+ _, known := definitions[unique]
+
+ for known {
+ idx++
+ unique = fmt.Sprintf("%s%d", name, idx)
+ _, known = definitions[unique]
+ }
+
+ return unique, isOAIGen
+}
+
+func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations map[string]operations.OpRef) []string {
+ var (
+ baseNames [][]string
+ startIndex int
+ )
+
+ switch {
+ case parts.IsOperation():
+ baseNames, startIndex = namesForOperation(parts, operations)
+ case parts.IsDefinition():
+ baseNames, startIndex = namesForDefinition(parts)
+ default:
+ // this a non-standard pointer: build a name by concatenating its parts
+ baseNames = [][]string{parts}
+ startIndex = len(baseNames) + 1
+ }
+
+ result := make([]string, 0, len(baseNames))
+ for _, segments := range baseNames {
+ nm := parts.BuildName(segments, startIndex, partAdder(aschema))
+ if nm == "" {
+ continue
+ }
+
+ result = append(result, nm)
+ }
+ sort.Strings(result)
+
+ debugLog("names from parts: %v => %v", parts, result)
+ return result
+}
+
+func namesForParam(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) {
+ var (
+ baseNames [][]string
+ startIndex int
+ )
+
+ piref := parts.PathItemRef()
+ if piref.String() != "" && parts.IsOperationParam() {
+ if op, ok := operations[piref.String()]; ok {
+ startIndex = 5
+ baseNames = append(baseNames, []string{op.ID, "params", "body"})
+ }
+ } else if parts.IsSharedOperationParam() {
+ pref := parts.PathRef()
+ for k, v := range operations {
+ if strings.HasPrefix(k, pref.String()) {
+ startIndex = 4
+ baseNames = append(baseNames, []string{v.ID, "params", "body"})
+ }
+ }
+ }
+
+ return baseNames, startIndex
+}
+
+func namesForOperation(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) {
+ var (
+ baseNames [][]string
+ startIndex int
+ )
+
+ // params
+ if parts.IsOperationParam() || parts.IsSharedOperationParam() {
+ baseNames, startIndex = namesForParam(parts, operations)
+ }
+
+ // responses
+ if parts.IsOperationResponse() {
+ piref := parts.PathItemRef()
+ if piref.String() != "" {
+ if op, ok := operations[piref.String()]; ok {
+ startIndex = 6
+ baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"})
+ }
+ }
+ }
+
+ return baseNames, startIndex
+}
+
+func namesForDefinition(parts sortref.SplitKey) ([][]string, int) {
+ nm := parts.DefinitionName()
+ if nm != "" {
+ return [][]string{{parts.DefinitionName()}}, 2
+ }
+
+ return [][]string{}, 0
+}
+
+// partAdder knows how to interpret a schema when it comes to build a name from parts
+func partAdder(aschema *AnalyzedSchema) sortref.PartAdder {
+ return func(part string) []string {
+ segments := make([]string, 0, 2)
+
+ if part == "items" || part == "additionalItems" {
+ if aschema.IsTuple || aschema.IsTupleWithExtra {
+ segments = append(segments, "tuple")
+ } else {
+ segments = append(segments, "items")
+ }
+
+ if part == "additionalItems" {
+ segments = append(segments, part)
+ }
+
+ return segments
+ }
+
+ segments = append(segments, part)
+
+ return segments
+ }
+}
+
+func mangler(o *FlattenOpts) func(string) string {
+ if o.KeepNames {
+ return func(in string) string { return in }
+ }
+
+ return swag.ToJSONName
+}
+
+func nameFromRef(ref spec.Ref, o *FlattenOpts) string {
+ mangle := mangler(o)
+
+ u := ref.GetURL()
+ if u.Fragment != "" {
+ return mangle(path.Base(u.Fragment))
+ }
+
+ if u.Path != "" {
+ bn := path.Base(u.Path)
+ if bn != "" && bn != "/" {
+ ext := path.Ext(bn)
+ if ext != "" {
+ return mangle(bn[:len(bn)-len(ext)])
+ }
+
+ return mangle(bn)
+ }
+ }
+
+ return mangle(strings.ReplaceAll(u.Host, ".", " "))
+}
+
+// GenLocation indicates from which section of the specification (models or operations) a definition has been created.
+//
+// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is provided
+// for information only.
+func GenLocation(parts sortref.SplitKey) string {
+ switch {
+ case parts.IsOperation():
+ return "operations"
+ case parts.IsDefinition():
+ return "models"
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go
new file mode 100644
index 00000000..c943fe1e
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/flatten_options.go
@@ -0,0 +1,79 @@
+package analysis
+
+import (
+ "log"
+
+ "github.com/go-openapi/spec"
+)
+
+// FlattenOpts configuration for flattening a swagger specification.
+//
+// The BasePath parameter is used to locate remote relative $ref found in the specification.
+// This path is a file: it points to the location of the root document and may be either a local
+// file path or a URL.
+//
+// If none specified, relative references (e.g. "$ref": "folder/schema.yaml#/definitions/...")
+// found in the spec are searched from the current working directory.
+type FlattenOpts struct {
+ Spec *Spec // The analyzed spec to work with
+ flattenContext *context // Internal context to track flattening activity
+
+ BasePath string // The location of the root document for this spec to resolve relative $ref
+
+ // Flattening options
+ Expand bool // When true, skip flattening the spec and expand it instead (if Minimal is false)
+ Minimal bool // When true, do not decompose complex structures such as allOf
+ Verbose bool // enable some reporting on possible name conflicts detected
+ RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening
+ ContinueOnError bool // Continue when spec expansion issues are found
+ KeepNames bool // Do not attempt to jsonify names from references when flattening
+
+ /* Extra keys */
+ _ struct{} // require keys
+}
+
+// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document.
+func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions {
+ return &spec.ExpandOptions{
+ RelativeBase: f.BasePath,
+ SkipSchemas: skipSchemas,
+ ContinueOnError: f.ContinueOnError,
+ }
+}
+
+// Swagger gets the swagger specification for this flatten operation
+func (f *FlattenOpts) Swagger() *spec.Swagger {
+ return f.Spec.spec
+}
+
+// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting
+// from flattening a spec
+func (f *FlattenOpts) croak() {
+ if !f.Verbose {
+ return
+ }
+
+ reported := make(map[string]bool, len(f.flattenContext.newRefs))
+ for _, v := range f.Spec.references.allRefs {
+ // warns about duplicate handling
+ for _, r := range f.flattenContext.newRefs {
+ if r.isOAIGen && r.path == v.String() {
+ reported[r.newName] = true
+ }
+ }
+ }
+
+ for k := range reported {
+ log.Printf("warning: duplicate flattened definition name resolved as %s", k)
+ }
+
+ // warns about possible type mismatches
+ uniqueMsg := make(map[string]bool)
+ for _, msg := range f.flattenContext.warnings {
+ if _, ok := uniqueMsg[msg]; ok {
+ continue
+ }
+ log.Printf("warning: %s", msg)
+ uniqueMsg[msg] = true
+ }
+}
diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go
new file mode 100644
index 00000000..39f55a97
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go
@@ -0,0 +1,41 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debug
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+var (
+ output = os.Stdout
+)
+
+// GetLogger provides a prefix debug logger
+func GetLogger(prefix string, debug bool) func(string, ...interface{}) {
+ if debug {
+ logger := log.New(output, prefix+":", log.LstdFlags)
+
+ return func(msg string, args ...interface{}) {
+ _, file1, pos1, _ := runtime.Caller(1)
+ logger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...))
+ }
+ }
+
+ return func(_ string, _ ...interface{}) {}
+}
diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go
new file mode 100644
index 00000000..8c9df058
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go
@@ -0,0 +1,87 @@
+package normalize
+
+import (
+ "net/url"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/go-openapi/spec"
+)
+
+// RebaseRef rebases a remote ref relative to a base ref.
+//
+// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here).
+//
+// NOTE(windows):
+// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec)
+// * "/ in paths may appear as escape sequences
+func RebaseRef(baseRef string, ref string) string {
+ baseRef, _ = url.PathUnescape(baseRef)
+ ref, _ = url.PathUnescape(ref)
+
+ if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") {
+ return ref
+ }
+
+ parts := strings.Split(ref, "#")
+
+ baseParts := strings.Split(baseRef, "#")
+ baseURL, _ := url.Parse(baseParts[0])
+ if strings.HasPrefix(ref, "#") {
+ if baseURL.Host == "" {
+ return strings.Join([]string{baseParts[0], parts[1]}, "#")
+ }
+
+ return strings.Join([]string{baseParts[0], parts[1]}, "#")
+ }
+
+ refURL, _ := url.Parse(parts[0])
+ if refURL.Host != "" || filepath.IsAbs(parts[0]) {
+ // not rebasing an absolute path
+ return ref
+ }
+
+ // there is a relative path
+ var basePath string
+ if baseURL.Host != "" {
+ // when there is a host, standard URI rules apply (with "/")
+ baseURL.Path = path.Dir(baseURL.Path)
+ baseURL.Path = path.Join(baseURL.Path, "/"+parts[0])
+
+ return baseURL.String()
+ }
+
+ // this is a local relative path
+ // basePart[0] and parts[0] are local filesystem directories/files
+ basePath = filepath.Dir(baseParts[0])
+ relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0])
+ if len(parts) > 1 {
+ return strings.Join([]string{relPath, parts[1]}, "#")
+ }
+
+ return relPath
+}
+
+// Path renders absolute path on remote file refs
+//
+// NOTE(windows):
+// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec)
+// * "/ in paths may appear as escape sequences
+func Path(ref spec.Ref, basePath string) string {
+ uri, _ := url.PathUnescape(ref.String())
+ if ref.HasFragmentOnly || filepath.IsAbs(uri) {
+ return uri
+ }
+
+ refURL, _ := url.Parse(uri)
+ if refURL.Host != "" {
+ return uri
+ }
+
+ parts := strings.Split(uri, "#")
+ // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage
+ parts[0] = filepath.Join(filepath.Dir(basePath), parts[0])
+
+ return strings.Join(parts, "#")
+}
diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go
new file mode 100644
index 00000000..7f3a2b87
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go
@@ -0,0 +1,90 @@
+package operations
+
+import (
+ "path"
+ "sort"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/swag"
+)
+
+// AllOpRefsByRef returns an index of sortable operations
+func AllOpRefsByRef(specDoc Provider, operationIDs []string) map[string]OpRef {
+ return OpRefsByRef(GatherOperations(specDoc, operationIDs))
+}
+
+// OpRefsByRef indexes a map of sortable operations
+func OpRefsByRef(oprefs map[string]OpRef) map[string]OpRef {
+ result := make(map[string]OpRef, len(oprefs))
+ for _, v := range oprefs {
+ result[v.Ref.String()] = v
+ }
+
+ return result
+}
+
+// OpRef is an indexable, sortable operation
+type OpRef struct {
+ Method string
+ Path string
+ Key string
+ ID string
+ Op *spec.Operation
+ Ref spec.Ref
+}
+
+// OpRefs is a sortable collection of operations
+type OpRefs []OpRef
+
+func (o OpRefs) Len() int { return len(o) }
+func (o OpRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
+func (o OpRefs) Less(i, j int) bool { return o[i].Key < o[j].Key }
+
+// Provider knows how to collect operations from a spec
+type Provider interface {
+ Operations() map[string]map[string]*spec.Operation
+}
+
+// GatherOperations builds a map of sorted operations from a spec
+func GatherOperations(specDoc Provider, operationIDs []string) map[string]OpRef {
+ var oprefs OpRefs
+
+ for method, pathItem := range specDoc.Operations() {
+ for pth, operation := range pathItem {
+ vv := *operation
+ oprefs = append(oprefs, OpRef{
+ Key: swag.ToGoName(strings.ToLower(method) + " " + pth),
+ Method: method,
+ Path: pth,
+ ID: vv.ID,
+ Op: &vv,
+ Ref: spec.MustCreateRef("#" + path.Join("/paths", jsonpointer.Escape(pth), method)),
+ })
+ }
+ }
+
+ sort.Sort(oprefs)
+
+ operations := make(map[string]OpRef)
+ for _, opr := range oprefs {
+ nm := opr.ID
+ if nm == "" {
+ nm = opr.Key
+ }
+
+ oo, found := operations[nm]
+ if found && oo.Method != opr.Method && oo.Path != opr.Path {
+ nm = opr.Key
+ }
+
+ if len(operationIDs) == 0 || swag.ContainsStrings(operationIDs, opr.ID) || swag.ContainsStrings(operationIDs, nm) {
+ opr.ID = nm
+ opr.Op.ID = nm
+ operations[nm] = opr
+ }
+ }
+
+ return operations
+}
diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go
new file mode 100644
index 00000000..c0f43e72
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go
@@ -0,0 +1,458 @@
+package replace
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "os"
+ "path"
+ "strconv"
+
+ "github.com/go-openapi/analysis/internal/debug"
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/spec"
+)
+
+const definitionsPath = "#/definitions"
+
+var debugLog = debug.GetLogger("analysis/flatten/replace", os.Getenv("SWAGGER_DEBUG") != "")
+
+// RewriteSchemaToRef replaces a schema with a Ref
+func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error {
+ debugLog("rewriting schema to ref for %s with %s", key, ref.String())
+ _, value, err := getPointerFromKey(sp, key)
+ if err != nil {
+ return err
+ }
+
+ switch refable := value.(type) {
+ case *spec.Schema:
+ return rewriteParentRef(sp, key, ref)
+
+ case spec.Schema:
+ return rewriteParentRef(sp, key, ref)
+
+ case *spec.SchemaOrArray:
+ if refable.Schema != nil {
+ refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+ }
+
+ case *spec.SchemaOrBool:
+ if refable.Schema != nil {
+ refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+ }
+ case map[string]interface{}: // this happens e.g. if a schema points to an extension unmarshaled as map[string]interface{}
+ return rewriteParentRef(sp, key, ref)
+ default:
+ return fmt.Errorf("no schema with ref found at %s for %T", key, value)
+ }
+
+ return nil
+}
+
+func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error {
+ parent, entry, pvalue, err := getParentFromKey(sp, key)
+ if err != nil {
+ return err
+ }
+
+ debugLog("rewriting holder for %T", pvalue)
+ switch container := pvalue.(type) {
+ case spec.Response:
+ if err := rewriteParentRef(sp, "#"+parent, ref); err != nil {
+ return err
+ }
+
+ case *spec.Response:
+ container.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ case *spec.Responses:
+ statusCode, err := strconv.Atoi(entry)
+ if err != nil {
+ return fmt.Errorf("%s not a number: %w", key[1:], err)
+ }
+ resp := container.StatusCodeResponses[statusCode]
+ resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+ container.StatusCodeResponses[statusCode] = resp
+
+ case map[string]spec.Response:
+ resp := container[entry]
+ resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+ container[entry] = resp
+
+ case spec.Parameter:
+ if err := rewriteParentRef(sp, "#"+parent, ref); err != nil {
+ return err
+ }
+
+ case map[string]spec.Parameter:
+ param := container[entry]
+ param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+ container[entry] = param
+
+ case []spec.Parameter:
+ idx, err := strconv.Atoi(entry)
+ if err != nil {
+ return fmt.Errorf("%s not a number: %w", key[1:], err)
+ }
+ param := container[idx]
+ param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+ container[idx] = param
+
+ case spec.Definitions:
+ container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ case map[string]spec.Schema:
+ container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ case []spec.Schema:
+ idx, err := strconv.Atoi(entry)
+ if err != nil {
+ return fmt.Errorf("%s not a number: %w", key[1:], err)
+ }
+ container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ case *spec.SchemaOrArray:
+ // NOTE: this is necessarily an array - otherwise, the parent would be *Schema
+ idx, err := strconv.Atoi(entry)
+ if err != nil {
+ return fmt.Errorf("%s not a number: %w", key[1:], err)
+ }
+ container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ case spec.SchemaProperties:
+ container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ case *interface{}:
+ *container = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema
+
+ default:
+ return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue)
+ }
+
+ return nil
+}
+
+// getPointerFromKey retrieves the content of the JSON pointer "key"
+func getPointerFromKey(sp interface{}, key string) (string, interface{}, error) {
+ switch sp.(type) {
+ case *spec.Schema:
+ case *spec.Swagger:
+ default:
+ panic("unexpected type used in getPointerFromKey")
+ }
+ if key == "#/" {
+ return "", sp, nil
+ }
+ // unescape chars in key, e.g. "{}" from path params
+ pth, _ := url.PathUnescape(key[1:])
+ ptr, err := jsonpointer.New(pth)
+ if err != nil {
+ return "", nil, err
+ }
+
+ value, _, err := ptr.Get(sp)
+ if err != nil {
+ debugLog("error when getting key: %s with path: %s", key, pth)
+
+ return "", nil, err
+ }
+
+ return pth, value, nil
+}
+
+// getParentFromKey retrieves the container of the JSON pointer "key"
+func getParentFromKey(sp interface{}, key string) (string, string, interface{}, error) {
+ switch sp.(type) {
+ case *spec.Schema:
+ case *spec.Swagger:
+ default:
+ panic("unexpected type used in getPointerFromKey")
+ }
+ // unescape chars in key, e.g. "{}" from path params
+ pth, _ := url.PathUnescape(key[1:])
+
+ parent, entry := path.Dir(pth), path.Base(pth)
+ debugLog("getting schema holder at: %s, with entry: %s", parent, entry)
+
+ pptr, err := jsonpointer.New(parent)
+ if err != nil {
+ return "", "", nil, err
+ }
+ pvalue, _, err := pptr.Get(sp)
+ if err != nil {
+ return "", "", nil, fmt.Errorf("can't get parent for %s: %w", parent, err)
+ }
+
+ return parent, entry, pvalue, nil
+}
+
+// UpdateRef replaces a ref by another one
+func UpdateRef(sp interface{}, key string, ref spec.Ref) error {
+ switch sp.(type) {
+ case *spec.Schema:
+ case *spec.Swagger:
+ default:
+ panic("unexpected type used in getPointerFromKey")
+ }
+ debugLog("updating ref for %s with %s", key, ref.String())
+ pth, value, err := getPointerFromKey(sp, key)
+ if err != nil {
+ return err
+ }
+
+ switch refable := value.(type) {
+ case *spec.Schema:
+ refable.Ref = ref
+ case *spec.SchemaOrArray:
+ if refable.Schema != nil {
+ refable.Schema.Ref = ref
+ }
+ case *spec.SchemaOrBool:
+ if refable.Schema != nil {
+ refable.Schema.Ref = ref
+ }
+ case spec.Schema:
+ debugLog("rewriting holder for %T", refable)
+ _, entry, pvalue, erp := getParentFromKey(sp, key)
+ if erp != nil {
+ return err
+ }
+ switch container := pvalue.(type) {
+ case spec.Definitions:
+ container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ case map[string]spec.Schema:
+ container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ case []spec.Schema:
+ idx, err := strconv.Atoi(entry)
+ if err != nil {
+ return fmt.Errorf("%s not a number: %w", pth, err)
+ }
+ container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ case *spec.SchemaOrArray:
+ // NOTE: this is necessarily an array - otherwise, the parent would be *Schema
+ idx, err := strconv.Atoi(entry)
+ if err != nil {
+ return fmt.Errorf("%s not a number: %w", pth, err)
+ }
+ container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ case spec.SchemaProperties:
+ container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
+
+ // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema
+
+ default:
+ return fmt.Errorf("unhandled container type at %s: %T", key, value)
+ }
+
+ default:
+ return fmt.Errorf("no schema with ref found at %s for %T", key, value)
+ }
+
+ return nil
+}
+
+// UpdateRefWithSchema replaces a ref with a schema (i.e. re-inline schema)
+func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error {
+ debugLog("updating ref for %s with schema", key)
+ pth, value, err := getPointerFromKey(sp, key)
+ if err != nil {
+ return err
+ }
+
+ switch refable := value.(type) {
+ case *spec.Schema:
+ *refable = *sch
+ case spec.Schema:
+ _, entry, pvalue, erp := getParentFromKey(sp, key)
+ if erp != nil {
+ return err
+ }
+ switch container := pvalue.(type) {
+ case spec.Definitions:
+ container[entry] = *sch
+
+ case map[string]spec.Schema:
+ container[entry] = *sch
+
+ case []spec.Schema:
+ idx, err := strconv.Atoi(entry)
+ if err != nil {
+ return fmt.Errorf("%s not a number: %w", pth, err)
+ }
+ container[idx] = *sch
+
+ case *spec.SchemaOrArray:
+ // NOTE: this is necessarily an array - otherwise, the parent would be *Schema
+ idx, err := strconv.Atoi(entry)
+ if err != nil {
+ return fmt.Errorf("%s not a number: %w", pth, err)
+ }
+ container.Schemas[idx] = *sch
+
+ case spec.SchemaProperties:
+ container[entry] = *sch
+
+ // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema
+
+ default:
+ return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value)
+ }
+ case *spec.SchemaOrArray:
+ *refable.Schema = *sch
+ // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema
+ case *spec.SchemaOrBool:
+ *refable.Schema = *sch
+ default:
+ return fmt.Errorf("no schema with ref found at %s for %T", key, value)
+ }
+
+ return nil
+}
+
+// DeepestRefResult holds the results from DeepestRef analysis
+type DeepestRefResult struct {
+ Ref spec.Ref
+ Schema *spec.Schema
+ Warnings []string
+}
+
+// DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions.
+// - if no definition is found, returns the deepest ref.
+// - pointers to external files are expanded
+//
+// NOTE: all external $ref's are assumed to be already expanded at this stage.
+func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) {
+ if !ref.HasFragmentOnly {
+ // we found an external $ref, which is odd at this stage:
+ // do nothing on external $refs
+ return &DeepestRefResult{Ref: ref}, nil
+ }
+
+ currentRef := ref
+ visited := make(map[string]bool, 64)
+ warnings := make([]string, 0, 2)
+
+DOWNREF:
+ for currentRef.String() != "" {
+ if path.Dir(currentRef.String()) == definitionsPath {
+ // this is a top-level definition: stop here and return this ref
+ return &DeepestRefResult{Ref: currentRef}, nil
+ }
+
+ if _, beenThere := visited[currentRef.String()]; beenThere {
+ return nil,
+ fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String())
+ }
+
+ visited[currentRef.String()] = true
+ value, _, err := currentRef.GetPointer().Get(sp)
+ if err != nil {
+ return nil, err
+ }
+
+ switch refable := value.(type) {
+ case *spec.Schema:
+ if refable.Ref.String() == "" {
+ break DOWNREF
+ }
+ currentRef = refable.Ref
+
+ case spec.Schema:
+ if refable.Ref.String() == "" {
+ break DOWNREF
+ }
+ currentRef = refable.Ref
+
+ case *spec.SchemaOrArray:
+ if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" {
+ break DOWNREF
+ }
+ currentRef = refable.Schema.Ref
+
+ case *spec.SchemaOrBool:
+ if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" {
+ break DOWNREF
+ }
+ currentRef = refable.Schema.Ref
+
+ case spec.Response:
+ // a pointer points to a schema initially marshalled in responses section...
+ // Attempt to convert this to a schema. If this fails, the spec is invalid
+ asJSON, _ := refable.MarshalJSON()
+ var asSchema spec.Schema
+
+ err := asSchema.UnmarshalJSON(asJSON)
+ if err != nil {
+ return nil,
+ fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)",
+ currentRef.String(), value, err,
+ )
+ }
+ warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String()))
+
+ if asSchema.Ref.String() == "" {
+ break DOWNREF
+ }
+ currentRef = asSchema.Ref
+
+ case spec.Parameter:
+ // a pointer points to a schema initially marshalled in parameters section...
+ // Attempt to convert this to a schema. If this fails, the spec is invalid
+ asJSON, _ := refable.MarshalJSON()
+ var asSchema spec.Schema
+ if err := asSchema.UnmarshalJSON(asJSON); err != nil {
+ return nil,
+ fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)",
+ currentRef.String(), value, err,
+ )
+ }
+
+ warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String()))
+
+ if asSchema.Ref.String() == "" {
+ break DOWNREF
+ }
+ currentRef = asSchema.Ref
+
+ default:
+ // fallback: attempts to resolve the pointer as a schema
+ if refable == nil {
+ break DOWNREF
+ }
+
+ asJSON, _ := json.Marshal(refable)
+ var asSchema spec.Schema
+ if err := asSchema.UnmarshalJSON(asJSON); err != nil {
+ return nil,
+ fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T (%v)",
+ currentRef.String(), value, err,
+ )
+ }
+ warnings = append(warnings, fmt.Sprintf("found $ref %q (%T) interpreted as schema", currentRef.String(), refable))
+
+ if asSchema.Ref.String() == "" {
+ break DOWNREF
+ }
+ currentRef = asSchema.Ref
+ }
+ }
+
+ // assess what schema we're ending with
+ sch, erv := spec.ResolveRefWithBase(sp, ¤tRef, opts)
+ if erv != nil {
+ return nil, erv
+ }
+
+ if sch == nil {
+ return nil, fmt.Errorf("no schema found at %s", currentRef.String())
+ }
+
+ return &DeepestRefResult{Ref: currentRef, Schema: sch, Warnings: warnings}, nil
+}
diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go
new file mode 100644
index 00000000..4590236e
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go
@@ -0,0 +1,29 @@
+// Package schutils provides tools to save or clone a schema
+// when flattening a spec.
+package schutils
+
+import (
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/swag"
+)
+
+// Save registers a schema as an entry in spec #/definitions
+func Save(sp *spec.Swagger, name string, schema *spec.Schema) {
+ if schema == nil {
+ return
+ }
+
+ if sp.Definitions == nil {
+ sp.Definitions = make(map[string]spec.Schema, 150)
+ }
+
+ sp.Definitions[name] = *schema
+}
+
+// Clone deep-clones a schema
+func Clone(schema *spec.Schema) *spec.Schema {
+ var sch spec.Schema
+ _ = swag.FromDynamicJSON(schema, &sch)
+
+ return &sch
+}
diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go
new file mode 100644
index 00000000..ac80fc2e
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go
@@ -0,0 +1,201 @@
+package sortref
+
+import (
+ "net/http"
+ "path"
+ "strconv"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/spec"
+)
+
+const (
+ paths = "paths"
+ responses = "responses"
+ parameters = "parameters"
+ definitions = "definitions"
+)
+
+var (
+ ignoredKeys map[string]struct{}
+ validMethods map[string]struct{}
+)
+
+func init() {
+ ignoredKeys = map[string]struct{}{
+ "schema": {},
+ "properties": {},
+ "not": {},
+ "anyOf": {},
+ "oneOf": {},
+ }
+
+ validMethods = map[string]struct{}{
+ "GET": {},
+ "HEAD": {},
+ "OPTIONS": {},
+ "PATCH": {},
+ "POST": {},
+ "PUT": {},
+ "DELETE": {},
+ }
+}
+
+// Key represent a key item constructed from /-separated segments
+type Key struct {
+ Segments int
+ Key string
+}
+
+// Keys is a sortable collable collection of Keys
+type Keys []Key
+
+func (k Keys) Len() int { return len(k) }
+func (k Keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] }
+func (k Keys) Less(i, j int) bool {
+ return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key)
+}
+
+// KeyParts construct a SplitKey with all its /-separated segments decomposed. It is sortable.
+func KeyParts(key string) SplitKey {
+ var res []string
+ for _, part := range strings.Split(key[1:], "/") {
+ if part != "" {
+ res = append(res, jsonpointer.Unescape(part))
+ }
+ }
+
+ return res
+}
+
+// SplitKey holds of the parts of a /-separated key, so that their location may be determined.
+type SplitKey []string
+
+// IsDefinition is true when the split key is in the #/definitions section of a spec
+func (s SplitKey) IsDefinition() bool {
+ return len(s) > 1 && s[0] == definitions
+}
+
+// DefinitionName yields the name of the definition
+func (s SplitKey) DefinitionName() string {
+ if !s.IsDefinition() {
+ return ""
+ }
+
+ return s[1]
+}
+
+func (s SplitKey) isKeyName(i int) bool {
+ if i <= 0 {
+ return false
+ }
+
+ count := 0
+ for idx := i - 1; idx > 0; idx-- {
+ if s[idx] != "properties" {
+ break
+ }
+ count++
+ }
+
+ return count%2 != 0
+}
+
+// PartAdder know how to construct the components of a new name
+type PartAdder func(string) []string
+
+// BuildName builds a name from segments
+func (s SplitKey) BuildName(segments []string, startIndex int, adder PartAdder) string {
+ for i, part := range s[startIndex:] {
+ if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) {
+ segments = append(segments, adder(part)...)
+ }
+ }
+
+ return strings.Join(segments, " ")
+}
+
+// IsOperation is true when the split key is in the operations section
+func (s SplitKey) IsOperation() bool {
+ return len(s) > 1 && s[0] == paths
+}
+
+// IsSharedOperationParam is true when the split key is in the parameters section of a path
+func (s SplitKey) IsSharedOperationParam() bool {
+ return len(s) > 2 && s[0] == paths && s[2] == parameters
+}
+
+// IsSharedParam is true when the split key is in the #/parameters section of a spec
+func (s SplitKey) IsSharedParam() bool {
+ return len(s) > 1 && s[0] == parameters
+}
+
+// IsOperationParam is true when the split key is in the parameters section of an operation
+func (s SplitKey) IsOperationParam() bool {
+ return len(s) > 3 && s[0] == paths && s[3] == parameters
+}
+
+// IsOperationResponse is true when the split key is in the responses section of an operation
+func (s SplitKey) IsOperationResponse() bool {
+ return len(s) > 3 && s[0] == paths && s[3] == responses
+}
+
+// IsSharedResponse is true when the split key is in the #/responses section of a spec
+func (s SplitKey) IsSharedResponse() bool {
+ return len(s) > 1 && s[0] == responses
+}
+
+// IsDefaultResponse is true when the split key is the default response for an operation
+func (s SplitKey) IsDefaultResponse() bool {
+ return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default"
+}
+
+// IsStatusCodeResponse is true when the split key is an operation response with a status code
+func (s SplitKey) IsStatusCodeResponse() bool {
+ isInt := func() bool {
+ _, err := strconv.Atoi(s[4])
+
+ return err == nil
+ }
+
+ return len(s) > 4 && s[0] == paths && s[3] == responses && isInt()
+}
+
+// ResponseName yields either the status code or "Default" for a response
+func (s SplitKey) ResponseName() string {
+ if s.IsStatusCodeResponse() {
+ code, _ := strconv.Atoi(s[4])
+
+ return http.StatusText(code)
+ }
+
+ if s.IsDefaultResponse() {
+ return "Default"
+ }
+
+ return ""
+}
+
+// PathItemRef constructs a $ref object from a split key of the form /{path}/{method}
+func (s SplitKey) PathItemRef() spec.Ref {
+ if len(s) < 3 {
+ return spec.Ref{}
+ }
+
+ pth, method := s[1], s[2]
+ if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") {
+ return spec.Ref{}
+ }
+
+ return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method)))
+}
+
+// PathRef constructs a $ref object from a split key of the form /paths/{reference}
+func (s SplitKey) PathRef() spec.Ref {
+ if !s.IsOperation() {
+ return spec.Ref{}
+ }
+
+ return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(s[1])))
+}
diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go
new file mode 100644
index 00000000..73243df8
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go
@@ -0,0 +1,141 @@
+package sortref
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/go-openapi/analysis/internal/flatten/normalize"
+ "github.com/go-openapi/spec"
+)
+
+var depthGroupOrder = []string{
+ "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition",
+}
+
+type mapIterator struct {
+ len int
+ mapIter *reflect.MapIter
+}
+
+func (i *mapIterator) Next() bool {
+ return i.mapIter.Next()
+}
+
+func (i *mapIterator) Len() int {
+ return i.len
+}
+
+func (i *mapIterator) Key() string {
+ return i.mapIter.Key().String()
+}
+
+func mustMapIterator(anyMap interface{}) *mapIterator {
+ val := reflect.ValueOf(anyMap)
+
+ return &mapIterator{mapIter: val.MapRange(), len: val.Len()}
+}
+
+// DepthFirst sorts a map of anything. It groups keys by category
+// (shared params, op param, statuscode response, default response, definitions)
+// sort groups internally by number of parts in the key and lexical names
+// flatten groups into a single list of keys
+func DepthFirst(in interface{}) []string {
+ iterator := mustMapIterator(in)
+ sorted := make([]string, 0, iterator.Len())
+ grouped := make(map[string]Keys, iterator.Len())
+
+ for iterator.Next() {
+ k := iterator.Key()
+ split := KeyParts(k)
+ var pk string
+
+ if split.IsSharedOperationParam() {
+ pk = "sharedOpParam"
+ }
+ if split.IsOperationParam() {
+ pk = "opParam"
+ }
+ if split.IsStatusCodeResponse() {
+ pk = "codeResponse"
+ }
+ if split.IsDefaultResponse() {
+ pk = "defaultResponse"
+ }
+ if split.IsDefinition() {
+ pk = "definition"
+ }
+ if split.IsSharedParam() {
+ pk = "sharedParam"
+ }
+ if split.IsSharedResponse() {
+ pk = "sharedResponse"
+ }
+ grouped[pk] = append(grouped[pk], Key{Segments: len(split), Key: k})
+ }
+
+ for _, pk := range depthGroupOrder {
+ res := grouped[pk]
+ sort.Sort(res)
+
+ for _, v := range res {
+ sorted = append(sorted, v.Key)
+ }
+ }
+
+ return sorted
+}
+
+// topMostRefs is able to sort refs by hierarchical then lexicographic order,
+// yielding refs ordered breadth-first.
+type topmostRefs []string
+
+func (k topmostRefs) Len() int { return len(k) }
+func (k topmostRefs) Swap(i, j int) { k[i], k[j] = k[j], k[i] }
+func (k topmostRefs) Less(i, j int) bool {
+ li, lj := len(strings.Split(k[i], "/")), len(strings.Split(k[j], "/"))
+ if li == lj {
+ return k[i] < k[j]
+ }
+
+ return li < lj
+}
+
+// TopmostFirst sorts references by depth
+func TopmostFirst(refs []string) []string {
+ res := topmostRefs(refs)
+ sort.Sort(res)
+
+ return res
+}
+
+// RefRevIdx is a reverse index for references
+type RefRevIdx struct {
+ Ref spec.Ref
+ Keys []string
+}
+
+// ReverseIndex builds a reverse index for references in schemas
+func ReverseIndex(schemas map[string]spec.Ref, basePath string) map[string]RefRevIdx {
+ collected := make(map[string]RefRevIdx)
+ for key, schRef := range schemas {
+ // normalize paths before sorting,
+ // so we get together keys that are from the same external file
+ normalizedPath := normalize.Path(schRef, basePath)
+
+ entry, ok := collected[normalizedPath]
+ if ok {
+ entry.Keys = append(entry.Keys, key)
+ collected[normalizedPath] = entry
+
+ continue
+ }
+
+ collected[normalizedPath] = RefRevIdx{
+ Ref: schRef,
+ Keys: []string{key},
+ }
+ }
+
+ return collected
+}
diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go
new file mode 100644
index 00000000..7785a29b
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/mixin.go
@@ -0,0 +1,515 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package analysis
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/go-openapi/spec"
+)
+
+// Mixin modifies the primary swagger spec by adding the paths and
+// definitions from the mixin specs. Top level parameters and
+// responses from the mixins are also carried over. Operation id
+// collisions are avoided by appending "Mixin" but only if
+// needed.
+//
+// The following parts of primary are subject to merge, filling empty details
+// - Info
+// - BasePath
+// - Host
+// - ExternalDocs
+//
+// Consider calling FixEmptyResponseDescriptions() on the modified primary
+// if you read them from storage and they are valid to start with.
+//
+// Entries in "paths", "definitions", "parameters" and "responses" are
+// added to the primary in the order of the given mixins. If the entry
+// already exists in primary it is skipped with a warning message.
+//
+// The count of skipped entries (from collisions) is returned so any
+// deviation from the number expected can flag a warning in your build
+// scripts. Carefully review the collisions before accepting them;
+// consider renaming things if possible.
+//
+// No key normalization takes place (paths, type defs,
+// etc). Ensure they are canonical if your downstream tools do
+// key normalization of any form.
+//
+// Merging schemes (http, https), and consumers/producers do not account for
+// collisions.
+func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string {
+ skipped := make([]string, 0, len(mixins))
+ opIDs := getOpIDs(primary)
+ initPrimary(primary)
+
+ for i, m := range mixins {
+ skipped = append(skipped, mergeSwaggerProps(primary, m)...)
+
+ skipped = append(skipped, mergeConsumes(primary, m)...)
+
+ skipped = append(skipped, mergeProduces(primary, m)...)
+
+ skipped = append(skipped, mergeTags(primary, m)...)
+
+ skipped = append(skipped, mergeSchemes(primary, m)...)
+
+ skipped = append(skipped, mergeSecurityDefinitions(primary, m)...)
+
+ skipped = append(skipped, mergeSecurityRequirements(primary, m)...)
+
+ skipped = append(skipped, mergeDefinitions(primary, m)...)
+
+ // merging paths requires a map of operationIDs to work with
+ skipped = append(skipped, mergePaths(primary, m, opIDs, i)...)
+
+ skipped = append(skipped, mergeParameters(primary, m)...)
+
+ skipped = append(skipped, mergeResponses(primary, m)...)
+ }
+
+ return skipped
+}
+
+// getOpIDs extracts all the paths..operationIds from the given
+// spec and returns them as the keys in a map with 'true' values.
+func getOpIDs(s *spec.Swagger) map[string]bool {
+ rv := make(map[string]bool)
+ if s.Paths == nil {
+ return rv
+ }
+
+ for _, v := range s.Paths.Paths {
+ piops := pathItemOps(v)
+
+ for _, op := range piops {
+ rv[op.ID] = true
+ }
+ }
+
+ return rv
+}
+
+func pathItemOps(p spec.PathItem) []*spec.Operation {
+ var rv []*spec.Operation
+ rv = appendOp(rv, p.Get)
+ rv = appendOp(rv, p.Put)
+ rv = appendOp(rv, p.Post)
+ rv = appendOp(rv, p.Delete)
+ rv = appendOp(rv, p.Head)
+ rv = appendOp(rv, p.Patch)
+
+ return rv
+}
+
+func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation {
+ if op == nil {
+ return ops
+ }
+
+ return append(ops, op)
+}
+
+func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) {
+ for k, v := range m.SecurityDefinitions {
+ if _, exists := primary.SecurityDefinitions[k]; exists {
+ warn := fmt.Sprintf(
+ "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k)
+ skipped = append(skipped, warn)
+
+ continue
+ }
+
+ primary.SecurityDefinitions[k] = v
+ }
+
+ return
+}
+
+func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) {
+ for _, v := range m.Security {
+ found := false
+ for _, vv := range primary.Security {
+ if reflect.DeepEqual(v, vv) {
+ found = true
+
+ break
+ }
+ }
+
+ if found {
+ warn := fmt.Sprintf(
+ "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v)
+ skipped = append(skipped, warn)
+
+ continue
+ }
+ primary.Security = append(primary.Security, v)
+ }
+
+ return
+}
+
+func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) {
+ for k, v := range m.Definitions {
+ // assume name collisions represent IDENTICAL type. careful.
+ if _, exists := primary.Definitions[k]; exists {
+ warn := fmt.Sprintf(
+ "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k)
+ skipped = append(skipped, warn)
+
+ continue
+ }
+ primary.Definitions[k] = v
+ }
+
+ return
+}
+
+func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIDs map[string]bool, mixIndex int) (skipped []string) {
+ if m.Paths != nil {
+ for k, v := range m.Paths.Paths {
+ if _, exists := primary.Paths.Paths[k]; exists {
+ warn := fmt.Sprintf(
+ "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k)
+ skipped = append(skipped, warn)
+
+ continue
+ }
+
+ // Swagger requires that operationIds be
+ // unique within a spec. If we find a
+ // collision we append "Mixin0" to the
+ // operatoinId we are adding, where 0 is mixin
+ // index. We assume that operationIds with
+ // all the proivded specs are already unique.
+ piops := pathItemOps(v)
+ for _, piop := range piops {
+ if opIDs[piop.ID] {
+ piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex)
+ }
+ opIDs[piop.ID] = true
+ }
+ primary.Paths.Paths[k] = v
+ }
+ }
+
+ return
+}
+
+func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) {
+ for k, v := range m.Parameters {
+ // could try to rename on conflict but would
+ // have to fix $refs in the mixin. Complain
+ // for now
+ if _, exists := primary.Parameters[k]; exists {
+ warn := fmt.Sprintf(
+ "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k)
+ skipped = append(skipped, warn)
+
+ continue
+ }
+ primary.Parameters[k] = v
+ }
+
+ return
+}
+
+func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) {
+ for k, v := range m.Responses {
+ // could try to rename on conflict but would
+ // have to fix $refs in the mixin. Complain
+ // for now
+ if _, exists := primary.Responses[k]; exists {
+ warn := fmt.Sprintf(
+ "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k)
+ skipped = append(skipped, warn)
+
+ continue
+ }
+ primary.Responses[k] = v
+ }
+
+ return skipped
+}
+
+func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string {
+ for _, v := range m.Consumes {
+ found := false
+ for _, vv := range primary.Consumes {
+ if v == vv {
+ found = true
+
+ break
+ }
+ }
+
+ if found {
+ // no warning here: we just skip it
+ continue
+ }
+ primary.Consumes = append(primary.Consumes, v)
+ }
+
+ return []string{}
+}
+
+func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string {
+ for _, v := range m.Produces {
+ found := false
+ for _, vv := range primary.Produces {
+ if v == vv {
+ found = true
+
+ break
+ }
+ }
+
+ if found {
+ // no warning here: we just skip it
+ continue
+ }
+ primary.Produces = append(primary.Produces, v)
+ }
+
+ return []string{}
+}
+
+func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) {
+ for _, v := range m.Tags {
+ found := false
+ for _, vv := range primary.Tags {
+ if v.Name == vv.Name {
+ found = true
+
+ break
+ }
+ }
+
+ if found {
+ warn := fmt.Sprintf(
+ "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n",
+ v.Name,
+ )
+ skipped = append(skipped, warn)
+
+ continue
+ }
+
+ primary.Tags = append(primary.Tags, v)
+ }
+
+ return
+}
+
+func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string {
+ for _, v := range m.Schemes {
+ found := false
+ for _, vv := range primary.Schemes {
+ if v == vv {
+ found = true
+
+ break
+ }
+ }
+
+ if found {
+ // no warning here: we just skip it
+ continue
+ }
+ primary.Schemes = append(primary.Schemes, v)
+ }
+
+ return []string{}
+}
+
+func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string {
+ var skipped, skippedInfo, skippedDocs []string
+
+ primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions)
+
+ // merging details in swagger top properties
+ if primary.Host == "" {
+ primary.Host = m.Host
+ }
+
+ if primary.BasePath == "" {
+ primary.BasePath = m.BasePath
+ }
+
+ if primary.Info == nil {
+ primary.Info = m.Info
+ } else if m.Info != nil {
+ skippedInfo = mergeInfo(primary.Info, m.Info)
+ skipped = append(skipped, skippedInfo...)
+ }
+
+ if primary.ExternalDocs == nil {
+ primary.ExternalDocs = m.ExternalDocs
+ } else if m != nil {
+ skippedDocs = mergeExternalDocs(primary.ExternalDocs, m.ExternalDocs)
+ skipped = append(skipped, skippedDocs...)
+ }
+
+ return skipped
+}
+
+//nolint:unparam
+func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string {
+ if primary.Description == "" {
+ primary.Description = m.Description
+ }
+
+ if primary.URL == "" {
+ primary.URL = m.URL
+ }
+
+ return nil
+}
+
+func mergeInfo(primary *spec.Info, m *spec.Info) []string {
+ var sk, skipped []string
+
+ primary.Extensions, sk = mergeExtensions(primary.Extensions, m.Extensions)
+ skipped = append(skipped, sk...)
+
+ if primary.Description == "" {
+ primary.Description = m.Description
+ }
+
+ if primary.Title == "" {
+ primary.Description = m.Description
+ }
+
+ if primary.TermsOfService == "" {
+ primary.TermsOfService = m.TermsOfService
+ }
+
+ if primary.Version == "" {
+ primary.Version = m.Version
+ }
+
+ if primary.Contact == nil {
+ primary.Contact = m.Contact
+ } else if m.Contact != nil {
+ var csk []string
+ primary.Contact.Extensions, csk = mergeExtensions(primary.Contact.Extensions, m.Contact.Extensions)
+ skipped = append(skipped, csk...)
+
+ if primary.Contact.Name == "" {
+ primary.Contact.Name = m.Contact.Name
+ }
+
+ if primary.Contact.URL == "" {
+ primary.Contact.URL = m.Contact.URL
+ }
+
+ if primary.Contact.Email == "" {
+ primary.Contact.Email = m.Contact.Email
+ }
+ }
+
+ if primary.License == nil {
+ primary.License = m.License
+ } else if m.License != nil {
+ var lsk []string
+ primary.License.Extensions, lsk = mergeExtensions(primary.License.Extensions, m.License.Extensions)
+ skipped = append(skipped, lsk...)
+
+ if primary.License.Name == "" {
+ primary.License.Name = m.License.Name
+ }
+
+ if primary.License.URL == "" {
+ primary.License.URL = m.License.URL
+ }
+ }
+
+ return skipped
+}
+
+func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) {
+ if primary == nil {
+ result = m
+
+ return
+ }
+
+ if m == nil {
+ result = primary
+
+ return
+ }
+
+ result = primary
+ for k, v := range m {
+ if _, found := primary[k]; found {
+ skipped = append(skipped, k)
+
+ continue
+ }
+
+ primary[k] = v
+ }
+
+ return
+}
+
+func initPrimary(primary *spec.Swagger) {
+ if primary.SecurityDefinitions == nil {
+ primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme)
+ }
+
+ if primary.Security == nil {
+ primary.Security = make([]map[string][]string, 0, 10)
+ }
+
+ if primary.Produces == nil {
+ primary.Produces = make([]string, 0, 10)
+ }
+
+ if primary.Consumes == nil {
+ primary.Consumes = make([]string, 0, 10)
+ }
+
+ if primary.Tags == nil {
+ primary.Tags = make([]spec.Tag, 0, 10)
+ }
+
+ if primary.Schemes == nil {
+ primary.Schemes = make([]string, 0, 10)
+ }
+
+ if primary.Paths == nil {
+ primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)}
+ }
+
+ if primary.Paths.Paths == nil {
+ primary.Paths.Paths = make(map[string]spec.PathItem)
+ }
+
+ if primary.Definitions == nil {
+ primary.Definitions = make(spec.Definitions)
+ }
+
+ if primary.Parameters == nil {
+ primary.Parameters = make(map[string]spec.Parameter)
+ }
+
+ if primary.Responses == nil {
+ primary.Responses = make(map[string]spec.Response)
+ }
+}
diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go
new file mode 100644
index 00000000..ab190db5
--- /dev/null
+++ b/vendor/github.com/go-openapi/analysis/schema.go
@@ -0,0 +1,256 @@
+package analysis
+
+import (
+ "errors"
+
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+)
+
+// SchemaOpts configures the schema analyzer
+type SchemaOpts struct {
+ Schema *spec.Schema
+ Root interface{}
+ BasePath string
+ _ struct{}
+}
+
+// Schema analysis, will classify the schema according to known
+// patterns.
+func Schema(opts SchemaOpts) (*AnalyzedSchema, error) {
+ if opts.Schema == nil {
+ return nil, errors.New("no schema to analyze")
+ }
+
+ a := &AnalyzedSchema{
+ schema: opts.Schema,
+ root: opts.Root,
+ basePath: opts.BasePath,
+ }
+
+ a.initializeFlags()
+ a.inferKnownType()
+ a.inferEnum()
+ a.inferBaseType()
+
+ if err := a.inferMap(); err != nil {
+ return nil, err
+ }
+ if err := a.inferArray(); err != nil {
+ return nil, err
+ }
+
+ a.inferTuple()
+
+ if err := a.inferFromRef(); err != nil {
+ return nil, err
+ }
+
+ a.inferSimpleSchema()
+
+ return a, nil
+}
+
+// AnalyzedSchema indicates what the schema represents
+type AnalyzedSchema struct {
+ schema *spec.Schema
+ root interface{}
+ basePath string
+
+ hasProps bool
+ hasAllOf bool
+ hasItems bool
+ hasAdditionalProps bool
+ hasAdditionalItems bool
+ hasRef bool
+
+ IsKnownType bool
+ IsSimpleSchema bool
+ IsArray bool
+ IsSimpleArray bool
+ IsMap bool
+ IsSimpleMap bool
+ IsExtendedObject bool
+ IsTuple bool
+ IsTupleWithExtra bool
+ IsBaseType bool
+ IsEnum bool
+}
+
+// Inherits copies value fields from other onto this schema
+func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) {
+ if other == nil {
+ return
+ }
+ a.hasProps = other.hasProps
+ a.hasAllOf = other.hasAllOf
+ a.hasItems = other.hasItems
+ a.hasAdditionalItems = other.hasAdditionalItems
+ a.hasAdditionalProps = other.hasAdditionalProps
+ a.hasRef = other.hasRef
+
+ a.IsKnownType = other.IsKnownType
+ a.IsSimpleSchema = other.IsSimpleSchema
+ a.IsArray = other.IsArray
+ a.IsSimpleArray = other.IsSimpleArray
+ a.IsMap = other.IsMap
+ a.IsSimpleMap = other.IsSimpleMap
+ a.IsExtendedObject = other.IsExtendedObject
+ a.IsTuple = other.IsTuple
+ a.IsTupleWithExtra = other.IsTupleWithExtra
+ a.IsBaseType = other.IsBaseType
+ a.IsEnum = other.IsEnum
+}
+
+func (a *AnalyzedSchema) inferFromRef() error {
+ if a.hasRef {
+ sch := new(spec.Schema)
+ sch.Ref = a.schema.Ref
+ err := spec.ExpandSchema(sch, a.root, nil)
+ if err != nil {
+ return err
+ }
+ rsch, err := Schema(SchemaOpts{
+ Schema: sch,
+ Root: a.root,
+ BasePath: a.basePath,
+ })
+ if err != nil {
+ // NOTE(fredbi): currently the only cause for errors is
+ // unresolved ref. Since spec.ExpandSchema() expands the
+ // schema recursively, there is no chance to get there,
+ // until we add more causes for error in this schema analysis.
+ return err
+ }
+ a.inherits(rsch)
+ }
+
+ return nil
+}
+
+func (a *AnalyzedSchema) inferSimpleSchema() {
+ a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap
+}
+
+func (a *AnalyzedSchema) inferKnownType() {
+ tpe := a.schema.Type
+ format := a.schema.Format
+ a.IsKnownType = tpe.Contains("boolean") ||
+ tpe.Contains("integer") ||
+ tpe.Contains("number") ||
+ tpe.Contains("string") ||
+ (format != "" && strfmt.Default.ContainsName(format)) ||
+ (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems)
+}
+
+func (a *AnalyzedSchema) inferMap() error {
+ if !a.isObjectType() {
+ return nil
+ }
+
+ hasExtra := a.hasProps || a.hasAllOf
+ a.IsMap = a.hasAdditionalProps && !hasExtra
+ a.IsExtendedObject = a.hasAdditionalProps && hasExtra
+
+ if !a.IsMap {
+ return nil
+ }
+
+ // maps
+ if a.schema.AdditionalProperties.Schema != nil {
+ msch, err := Schema(SchemaOpts{
+ Schema: a.schema.AdditionalProperties.Schema,
+ Root: a.root,
+ BasePath: a.basePath,
+ })
+ if err != nil {
+ return err
+ }
+ a.IsSimpleMap = msch.IsSimpleSchema
+ } else if a.schema.AdditionalProperties.Allows {
+ a.IsSimpleMap = true
+ }
+
+ return nil
+}
+
+func (a *AnalyzedSchema) inferArray() error {
+ // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple
+ // (yes, even if the Items array contains only one element).
+ // arrays in JSON schema may be unrestricted (i.e no Items specified).
+ // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays.
+ //
+ // NOTE: the spec package misses the distinction between:
+ // items: [] and items: {}, so we consider both arrays here.
+ a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil)
+ if a.IsArray && a.hasItems {
+ if a.schema.Items.Schema != nil {
+ itsch, err := Schema(SchemaOpts{
+ Schema: a.schema.Items.Schema,
+ Root: a.root,
+ BasePath: a.basePath,
+ })
+ if err != nil {
+ return err
+ }
+
+ a.IsSimpleArray = itsch.IsSimpleSchema
+ }
+ }
+
+ if a.IsArray && !a.hasItems {
+ a.IsSimpleArray = true
+ }
+
+ return nil
+}
+
+func (a *AnalyzedSchema) inferTuple() {
+ tuple := a.hasItems && a.schema.Items.Schemas != nil
+ a.IsTuple = tuple && !a.hasAdditionalItems
+ a.IsTupleWithExtra = tuple && a.hasAdditionalItems
+}
+
+func (a *AnalyzedSchema) inferBaseType() {
+ if a.isObjectType() {
+ a.IsBaseType = a.schema.Discriminator != ""
+ }
+}
+
+func (a *AnalyzedSchema) inferEnum() {
+ a.IsEnum = len(a.schema.Enum) > 0
+}
+
+func (a *AnalyzedSchema) initializeFlags() {
+ a.hasProps = len(a.schema.Properties) > 0
+ a.hasAllOf = len(a.schema.AllOf) > 0
+ a.hasRef = a.schema.Ref.String() != ""
+
+ a.hasItems = a.schema.Items != nil &&
+ (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0)
+
+ a.hasAdditionalProps = a.schema.AdditionalProperties != nil &&
+ (a.schema.AdditionalProperties.Schema != nil || a.schema.AdditionalProperties.Allows)
+
+ a.hasAdditionalItems = a.schema.AdditionalItems != nil &&
+ (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows)
+}
+
+func (a *AnalyzedSchema) isObjectType() bool {
+ return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object"))
+}
+
+func (a *AnalyzedSchema) isArrayType() bool {
+ return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array"))
+}
+
+// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex").
+//
+// Complex means the schema is any of:
+// - a simple type (primitive)
+// - an array of something (items are possibly complex ; if this is the case, items will generate a definition)
+// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will
+// generate a definition)
+func (a *AnalyzedSchema) isAnalyzedAsComplex() bool {
+ return !a.IsSimpleSchema && !a.IsArray && !a.IsMap
+}
diff --git a/vendor/github.com/go-openapi/errors/.gitattributes b/vendor/github.com/go-openapi/errors/.gitattributes
new file mode 100644
index 00000000..a0717e4b
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/.gitattributes
@@ -0,0 +1 @@
+*.go text eol=lf
\ No newline at end of file
diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore
new file mode 100644
index 00000000..dd91ed6a
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/.gitignore
@@ -0,0 +1,2 @@
+secrets.yml
+coverage.out
diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml
new file mode 100644
index 00000000..60798c21
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/.golangci.yml
@@ -0,0 +1,75 @@
+version: "2"
+linters:
+ default: all
+ disable:
+ - cyclop
+ - depguard
+ - errchkjson
+ - errorlint
+ - exhaustruct
+ - forcetypeassert
+ - funlen
+ - gochecknoglobals
+ - gochecknoinits
+ - gocognit
+ - godot
+ - godox
+ - gosmopolitan
+ - inamedparam
+ - intrange # disabled while < go1.22
+ - ireturn
+ - lll
+ - musttag
+ - nestif
+ - nlreturn
+ - noinlineerr
+ - nonamedreturns
+ - paralleltest
+ - recvcheck
+ - testpackage
+ - thelper
+ - tparallel
+ - unparam
+ - varnamelen
+ - whitespace
+ - wrapcheck
+ - wsl
+ - wsl_v5
+ settings:
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+ gocyclo:
+ min-complexity: 45
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+issues:
+ # Maximum issues count per one linter.
+ # Set to 0 to disable.
+ # Default: 50
+ max-issues-per-linter: 0
+ # Maximum count of issues with the same text.
+ # Set to 0 to disable.
+ # Default: 3
+ max-same-issues: 0
diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/lxc/lxd/COPYING b/vendor/github.com/go-openapi/errors/LICENSE
similarity index 100%
rename from vendor/github.com/lxc/lxd/COPYING
rename to vendor/github.com/go-openapi/errors/LICENSE
diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md
new file mode 100644
index 00000000..6d57ea55
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/README.md
@@ -0,0 +1,8 @@
+# OpenAPI errors [](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/errors)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/errors)
+[](https://goreportcard.com/report/github.com/go-openapi/errors)
+
+Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit.
diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go
new file mode 100644
index 00000000..d6f507f4
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/api.go
@@ -0,0 +1,192 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "reflect"
+ "strings"
+)
+
+// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code.
+var DefaultHTTPCode = http.StatusUnprocessableEntity
+
+// Error represents a error interface all swagger framework errors implement
+type Error interface {
+ error
+ Code() int32
+}
+
+type apiError struct {
+ code int32
+ message string
+}
+
+func (a *apiError) Error() string {
+ return a.message
+}
+
+func (a *apiError) Code() int32 {
+ return a.code
+}
+
+// MarshalJSON implements the JSON encoding interface
+func (a apiError) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]interface{}{
+ "code": a.code,
+ "message": a.message,
+ })
+}
+
+// New creates a new API error with a code and a message
+func New(code int32, message string, args ...interface{}) Error {
+ if len(args) > 0 {
+ return &apiError{
+ code: code,
+ message: fmt.Sprintf(message, args...),
+ }
+ }
+ return &apiError{
+ code: code,
+ message: message,
+ }
+}
+
+// NotFound creates a new not found error
+func NotFound(message string, args ...interface{}) Error {
+ if message == "" {
+ message = "Not found"
+ }
+ return New(http.StatusNotFound, fmt.Sprintf(message, args...))
+}
+
+// NotImplemented creates a new not implemented error
+func NotImplemented(message string) Error {
+ return New(http.StatusNotImplemented, message)
+}
+
+// MethodNotAllowedError represents an error for when the path matches but the method doesn't
+type MethodNotAllowedError struct {
+ code int32
+ Allowed []string
+ message string
+}
+
+func (m *MethodNotAllowedError) Error() string {
+ return m.message
+}
+
+// Code the error code
+func (m *MethodNotAllowedError) Code() int32 {
+ return m.code
+}
+
+// MarshalJSON implements the JSON encoding interface
+func (m MethodNotAllowedError) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]interface{}{
+ "code": m.code,
+ "message": m.message,
+ "allowed": m.Allowed,
+ })
+}
+
+func errorAsJSON(err Error) []byte {
+ //nolint:errchkjson
+ b, _ := json.Marshal(struct {
+ Code int32 `json:"code"`
+ Message string `json:"message"`
+ }{err.Code(), err.Error()})
+ return b
+}
+
+func flattenComposite(errs *CompositeError) *CompositeError {
+ var res []error
+ for _, er := range errs.Errors {
+ switch e := er.(type) {
+ case *CompositeError:
+ if e != nil && len(e.Errors) > 0 {
+ flat := flattenComposite(e)
+ if len(flat.Errors) > 0 {
+ res = append(res, flat.Errors...)
+ }
+ }
+ default:
+ if e != nil {
+ res = append(res, e)
+ }
+ }
+ }
+ return CompositeValidationError(res...)
+}
+
+// MethodNotAllowed creates a new method not allowed error
+func MethodNotAllowed(requested string, allow []string) Error {
+ msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ","))
+ return &MethodNotAllowedError{
+ code: http.StatusMethodNotAllowed,
+ Allowed: allow,
+ message: msg,
+ }
+}
+
+// ServeError implements the http error handler interface
+func ServeError(rw http.ResponseWriter, r *http.Request, err error) {
+ rw.Header().Set("Content-Type", "application/json")
+ switch e := err.(type) {
+ case *CompositeError:
+ er := flattenComposite(e)
+ // strips composite errors to first element only
+ if len(er.Errors) > 0 {
+ ServeError(rw, r, er.Errors[0])
+ } else {
+ // guard against empty CompositeError (invalid construct)
+ ServeError(rw, r, nil)
+ }
+ case *MethodNotAllowedError:
+ rw.Header().Add("Allow", strings.Join(e.Allowed, ","))
+ rw.WriteHeader(asHTTPCode(int(e.Code())))
+ if r == nil || r.Method != http.MethodHead {
+ _, _ = rw.Write(errorAsJSON(e))
+ }
+ case Error:
+ value := reflect.ValueOf(e)
+ if value.Kind() == reflect.Ptr && value.IsNil() {
+ rw.WriteHeader(http.StatusInternalServerError)
+ _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error")))
+ return
+ }
+ rw.WriteHeader(asHTTPCode(int(e.Code())))
+ if r == nil || r.Method != http.MethodHead {
+ _, _ = rw.Write(errorAsJSON(e))
+ }
+ case nil:
+ rw.WriteHeader(http.StatusInternalServerError)
+ _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error")))
+ default:
+ rw.WriteHeader(http.StatusInternalServerError)
+ if r == nil || r.Method != http.MethodHead {
+ _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error())))
+ }
+ }
+}
+
+func asHTTPCode(input int) int {
+ if input >= maximumValidHTTPCode {
+ return DefaultHTTPCode
+ }
+ return input
+}
diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go
new file mode 100644
index 00000000..0545b501
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/auth.go
@@ -0,0 +1,22 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import "net/http"
+
+// Unauthenticated returns an unauthenticated error
+func Unauthenticated(scheme string) Error {
+ return New(http.StatusUnauthorized, "unauthenticated for %s", scheme)
+}
diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go
new file mode 100644
index 00000000..af01190c
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/doc.go
@@ -0,0 +1,26 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package errors provides an Error interface and several concrete types
+implementing this interface to manage API errors and JSON-schema validation
+errors.
+
+A middleware handler ServeError() is provided to serve the errors types
+it defines.
+
+It is used throughout the various go-openapi toolkit libraries
+(https://github.com/go-openapi).
+*/
+package errors
diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go
new file mode 100644
index 00000000..6ea1151f
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/headers.go
@@ -0,0 +1,103 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+// Validation represents a failure of a precondition
+type Validation struct { //nolint: errname
+ code int32
+ Name string
+ In string
+ Value interface{}
+ message string
+ Values []interface{}
+}
+
+func (e *Validation) Error() string {
+ return e.message
+}
+
+// Code the error code
+func (e *Validation) Code() int32 {
+ return e.code
+}
+
+// MarshalJSON implements the JSON encoding interface
+func (e Validation) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]interface{}{
+ "code": e.code,
+ "message": e.message,
+ "in": e.In,
+ "name": e.Name,
+ "value": e.Value,
+ "values": e.Values,
+ })
+}
+
+// ValidateName sets the name for a validation or updates it for a nested property
+func (e *Validation) ValidateName(name string) *Validation {
+ if name != "" {
+ if e.Name == "" {
+ e.Name = name
+ e.message = name + e.message
+ } else {
+ e.Name = name + "." + e.Name
+ e.message = name + "." + e.message
+ }
+ }
+ return e
+}
+
+const (
+ contentTypeFail = `unsupported media type %q, only %v are allowed`
+ responseFormatFail = `unsupported media type requested, only %v are available`
+)
+
+// InvalidContentType error for an invalid content type
+func InvalidContentType(value string, allowed []string) *Validation {
+ values := make([]interface{}, 0, len(allowed))
+ for _, v := range allowed {
+ values = append(values, v)
+ }
+ return &Validation{
+ code: http.StatusUnsupportedMediaType,
+ Name: "Content-Type",
+ In: "header",
+ Value: value,
+ Values: values,
+ message: fmt.Sprintf(contentTypeFail, value, allowed),
+ }
+}
+
+// InvalidResponseFormat error for an unacceptable response format request
+func InvalidResponseFormat(value string, allowed []string) *Validation {
+ values := make([]interface{}, 0, len(allowed))
+ for _, v := range allowed {
+ values = append(values, v)
+ }
+ return &Validation{
+ code: http.StatusNotAcceptable,
+ Name: "Accept",
+ In: "header",
+ Value: value,
+ Values: values,
+ message: fmt.Sprintf(responseFormatFail, allowed),
+ }
+}
diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go
new file mode 100644
index 00000000..1b9f3a93
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/middleware.go
@@ -0,0 +1,50 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+// APIVerificationFailed is an error that contains all the missing info for a mismatched section
+// between the api registrations and the api spec
+type APIVerificationFailed struct { //nolint: errname
+ Section string `json:"section,omitempty"`
+ MissingSpecification []string `json:"missingSpecification,omitempty"`
+ MissingRegistration []string `json:"missingRegistration,omitempty"`
+}
+
+func (v *APIVerificationFailed) Error() string {
+ buf := bytes.NewBuffer(nil)
+
+ hasRegMissing := len(v.MissingRegistration) > 0
+ hasSpecMissing := len(v.MissingSpecification) > 0
+
+ if hasRegMissing {
+ fmt.Fprintf(buf, "missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)
+ }
+
+ if hasRegMissing && hasSpecMissing {
+ buf.WriteString("\n")
+ }
+
+ if hasSpecMissing {
+ fmt.Fprintf(buf, "missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go
new file mode 100644
index 00000000..34930c08
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/parsing.go
@@ -0,0 +1,79 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+// ParseError represents a parsing error
+type ParseError struct {
+ code int32
+ Name string
+ In string
+ Value string
+ Reason error
+ message string
+}
+
+// NewParseError creates a new parse error
+func NewParseError(name, in, value string, reason error) *ParseError {
+ var msg string
+ if in == "" {
+ msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason)
+ } else {
+ msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason)
+ }
+ return &ParseError{
+ code: http.StatusBadRequest,
+ Name: name,
+ In: in,
+ Value: value,
+ Reason: reason,
+ message: msg,
+ }
+}
+
+func (e *ParseError) Error() string {
+ return e.message
+}
+
+// Code returns the http status code for this error
+func (e *ParseError) Code() int32 {
+ return e.code
+}
+
+// MarshalJSON implements the JSON encoding interface
+func (e ParseError) MarshalJSON() ([]byte, error) {
+ var reason string
+ if e.Reason != nil {
+ reason = e.Reason.Error()
+ }
+ return json.Marshal(map[string]interface{}{
+ "code": e.code,
+ "message": e.message,
+ "in": e.In,
+ "name": e.Name,
+ "value": e.Value,
+ "reason": reason,
+ })
+}
+
+const (
+ parseErrorTemplContent = `parsing %s %s from %q failed, because %s`
+ parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s`
+)
diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go
new file mode 100644
index 00000000..8f3239df
--- /dev/null
+++ b/vendor/github.com/go-openapi/errors/schema.go
@@ -0,0 +1,619 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+)
+
+const (
+ invalidType = "%s is an invalid type name"
+ typeFail = "%s in %s must be of type %s"
+ typeFailWithData = "%s in %s must be of type %s: %q"
+ typeFailWithError = "%s in %s must be of type %s, because: %s"
+ requiredFail = "%s in %s is required"
+ readOnlyFail = "%s in %s is readOnly"
+ tooLongMessage = "%s in %s should be at most %d chars long"
+ tooShortMessage = "%s in %s should be at least %d chars long"
+ patternFail = "%s in %s should match '%s'"
+ enumFail = "%s in %s should be one of %v"
+ multipleOfFail = "%s in %s should be a multiple of %v"
+ maximumIncFail = "%s in %s should be less than or equal to %v"
+ maximumExcFail = "%s in %s should be less than %v"
+ minIncFail = "%s in %s should be greater than or equal to %v"
+ minExcFail = "%s in %s should be greater than %v"
+ uniqueFail = "%s in %s shouldn't contain duplicates"
+ maximumItemsFail = "%s in %s should have at most %d items"
+ minItemsFail = "%s in %s should have at least %d items"
+ typeFailNoIn = "%s must be of type %s"
+ typeFailWithDataNoIn = "%s must be of type %s: %q"
+ typeFailWithErrorNoIn = "%s must be of type %s, because: %s"
+ requiredFailNoIn = "%s is required"
+ readOnlyFailNoIn = "%s is readOnly"
+ tooLongMessageNoIn = "%s should be at most %d chars long"
+ tooShortMessageNoIn = "%s should be at least %d chars long"
+ patternFailNoIn = "%s should match '%s'"
+ enumFailNoIn = "%s should be one of %v"
+ multipleOfFailNoIn = "%s should be a multiple of %v"
+ maximumIncFailNoIn = "%s should be less than or equal to %v"
+ maximumExcFailNoIn = "%s should be less than %v"
+ minIncFailNoIn = "%s should be greater than or equal to %v"
+ minExcFailNoIn = "%s should be greater than %v"
+ uniqueFailNoIn = "%s shouldn't contain duplicates"
+ maximumItemsFailNoIn = "%s should have at most %d items"
+ minItemsFailNoIn = "%s should have at least %d items"
+ noAdditionalItems = "%s in %s can't have additional items"
+ noAdditionalItemsNoIn = "%s can't have additional items"
+ tooFewProperties = "%s in %s should have at least %d properties"
+ tooFewPropertiesNoIn = "%s should have at least %d properties"
+ tooManyProperties = "%s in %s should have at most %d properties"
+ tooManyPropertiesNoIn = "%s should have at most %d properties"
+ unallowedProperty = "%s.%s in %s is a forbidden property"
+ unallowedPropertyNoIn = "%s.%s is a forbidden property"
+ failedAllPatternProps = "%s.%s in %s failed all pattern properties"
+ failedAllPatternPropsNoIn = "%s.%s failed all pattern properties"
+ multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v"
+)
+
+const maximumValidHTTPCode = 600
+
+// All code responses can be used to differentiate errors for different handling
+// by the consuming program
+const (
+ // CompositeErrorCode remains 422 for backwards-compatibility
+ // and to separate it from validation errors with cause
+ CompositeErrorCode = http.StatusUnprocessableEntity
+
+ // InvalidTypeCode is used for any subclass of invalid types
+ InvalidTypeCode = maximumValidHTTPCode + iota
+ RequiredFailCode
+ TooLongFailCode
+ TooShortFailCode
+ PatternFailCode
+ EnumFailCode
+ MultipleOfFailCode
+ MaxFailCode
+ MinFailCode
+ UniqueFailCode
+ MaxItemsFailCode
+ MinItemsFailCode
+ NoAdditionalItemsCode
+ TooFewPropertiesCode
+ TooManyPropertiesCode
+ UnallowedPropertyCode
+ FailedAllPatternPropsCode
+ MultipleOfMustBePositiveCode
+ ReadOnlyFailCode
+)
+
+// CompositeError is an error that groups several errors together
+type CompositeError struct {
+ Errors []error
+ code int32
+ message string
+}
+
+// Code for this error
+func (c *CompositeError) Code() int32 {
+ return c.code
+}
+
+func (c *CompositeError) Error() string {
+ if len(c.Errors) > 0 {
+ msgs := []string{c.message + ":"}
+ for _, e := range c.Errors {
+ msgs = append(msgs, e.Error())
+ }
+ return strings.Join(msgs, "\n")
+ }
+ return c.message
+}
+
+func (c *CompositeError) Unwrap() []error {
+ return c.Errors
+}
+
+// MarshalJSON implements the JSON encoding interface
+func (c CompositeError) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]interface{}{
+ "code": c.code,
+ "message": c.message,
+ "errors": c.Errors,
+ })
+}
+
+// CompositeValidationError an error to wrap a bunch of other errors
+func CompositeValidationError(errors ...error) *CompositeError {
+ return &CompositeError{
+ code: CompositeErrorCode,
+ Errors: append(make([]error, 0, len(errors)), errors...),
+ message: "validation failure list",
+ }
+}
+
+// ValidateName recursively sets the name for all validations or updates them for nested properties
+func (c *CompositeError) ValidateName(name string) *CompositeError {
+ for i, e := range c.Errors {
+ if ve, ok := e.(*Validation); ok {
+ c.Errors[i] = ve.ValidateName(name)
+ } else if ce, ok := e.(*CompositeError); ok {
+ c.Errors[i] = ce.ValidateName(name)
+ }
+ }
+
+ return c
+}
+
+// FailedAllPatternProperties an error for when the property doesn't match a pattern
+func FailedAllPatternProperties(name, in, key string) *Validation {
+ msg := fmt.Sprintf(failedAllPatternProps, name, key, in)
+ if in == "" {
+ msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key)
+ }
+ return &Validation{
+ code: FailedAllPatternPropsCode,
+ Name: name,
+ In: in,
+ Value: key,
+ message: msg,
+ }
+}
+
+// PropertyNotAllowed an error for when the property doesn't match a pattern
+func PropertyNotAllowed(name, in, key string) *Validation {
+ msg := fmt.Sprintf(unallowedProperty, name, key, in)
+ if in == "" {
+ msg = fmt.Sprintf(unallowedPropertyNoIn, name, key)
+ }
+ return &Validation{
+ code: UnallowedPropertyCode,
+ Name: name,
+ In: in,
+ Value: key,
+ message: msg,
+ }
+}
+
+// TooFewProperties an error for an object with too few properties
+func TooFewProperties(name, in string, n int64) *Validation {
+ msg := fmt.Sprintf(tooFewProperties, name, in, n)
+ if in == "" {
+ msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n)
+ }
+ return &Validation{
+ code: TooFewPropertiesCode,
+ Name: name,
+ In: in,
+ Value: n,
+ message: msg,
+ }
+}
+
+// TooManyProperties an error for an object with too many properties
+func TooManyProperties(name, in string, n int64) *Validation {
+ msg := fmt.Sprintf(tooManyProperties, name, in, n)
+ if in == "" {
+ msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n)
+ }
+ return &Validation{
+ code: TooManyPropertiesCode,
+ Name: name,
+ In: in,
+ Value: n,
+ message: msg,
+ }
+}
+
+// AdditionalItemsNotAllowed an error for invalid additional items
+func AdditionalItemsNotAllowed(name, in string) *Validation {
+ msg := fmt.Sprintf(noAdditionalItems, name, in)
+ if in == "" {
+ msg = fmt.Sprintf(noAdditionalItemsNoIn, name)
+ }
+ return &Validation{
+ code: NoAdditionalItemsCode,
+ Name: name,
+ In: in,
+ message: msg,
+ }
+}
+
+// InvalidCollectionFormat another flavor of invalid type error
+func InvalidCollectionFormat(name, in, format string) *Validation {
+ return &Validation{
+ code: InvalidTypeCode,
+ Name: name,
+ In: in,
+ Value: format,
+ message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name),
+ }
+}
+
+// InvalidTypeName an error for when the type is invalid
+func InvalidTypeName(typeName string) *Validation {
+ return &Validation{
+ code: InvalidTypeCode,
+ Value: typeName,
+ message: fmt.Sprintf(invalidType, typeName),
+ }
+}
+
+// InvalidType creates an error for when the type is invalid
+func InvalidType(name, in, typeName string, value interface{}) *Validation {
+ var message string
+
+ if in != "" {
+ switch value.(type) {
+ case string:
+ message = fmt.Sprintf(typeFailWithData, name, in, typeName, value)
+ case error:
+ message = fmt.Sprintf(typeFailWithError, name, in, typeName, value)
+ default:
+ message = fmt.Sprintf(typeFail, name, in, typeName)
+ }
+ } else {
+ switch value.(type) {
+ case string:
+ message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value)
+ case error:
+ message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value)
+ default:
+ message = fmt.Sprintf(typeFailNoIn, name, typeName)
+ }
+ }
+
+ return &Validation{
+ code: InvalidTypeCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: message,
+ }
+
+}
+
+// DuplicateItems error for when an array contains duplicates
+func DuplicateItems(name, in string) *Validation {
+ msg := fmt.Sprintf(uniqueFail, name, in)
+ if in == "" {
+ msg = fmt.Sprintf(uniqueFailNoIn, name)
+ }
+ return &Validation{
+ code: UniqueFailCode,
+ Name: name,
+ In: in,
+ message: msg,
+ }
+}
+
+// TooManyItems error for when an array contains too many items
+func TooManyItems(name, in string, maximum int64, value interface{}) *Validation {
+ msg := fmt.Sprintf(maximumItemsFail, name, in, maximum)
+ if in == "" {
+ msg = fmt.Sprintf(maximumItemsFailNoIn, name, maximum)
+ }
+
+ return &Validation{
+ code: MaxItemsFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: msg,
+ }
+}
+
+// TooFewItems error for when an array contains too few items
+func TooFewItems(name, in string, minimum int64, value interface{}) *Validation {
+ msg := fmt.Sprintf(minItemsFail, name, in, minimum)
+ if in == "" {
+ msg = fmt.Sprintf(minItemsFailNoIn, name, minimum)
+ }
+ return &Validation{
+ code: MinItemsFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: msg,
+ }
+}
+
+// ExceedsMaximumInt error for when maximumimum validation fails
+func ExceedsMaximumInt(name, in string, maximum int64, exclusive bool, value interface{}) *Validation {
+ var message string
+ if in == "" {
+ m := maximumIncFailNoIn
+ if exclusive {
+ m = maximumExcFailNoIn
+ }
+ message = fmt.Sprintf(m, name, maximum)
+ } else {
+ m := maximumIncFail
+ if exclusive {
+ m = maximumExcFail
+ }
+ message = fmt.Sprintf(m, name, in, maximum)
+ }
+ return &Validation{
+ code: MaxFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: message,
+ }
+}
+
+// ExceedsMaximumUint error for when maximumimum validation fails
+func ExceedsMaximumUint(name, in string, maximum uint64, exclusive bool, value interface{}) *Validation {
+ var message string
+ if in == "" {
+ m := maximumIncFailNoIn
+ if exclusive {
+ m = maximumExcFailNoIn
+ }
+ message = fmt.Sprintf(m, name, maximum)
+ } else {
+ m := maximumIncFail
+ if exclusive {
+ m = maximumExcFail
+ }
+ message = fmt.Sprintf(m, name, in, maximum)
+ }
+ return &Validation{
+ code: MaxFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: message,
+ }
+}
+
+// ExceedsMaximum error for when maximumimum validation fails
+func ExceedsMaximum(name, in string, maximum float64, exclusive bool, value interface{}) *Validation {
+ var message string
+ if in == "" {
+ m := maximumIncFailNoIn
+ if exclusive {
+ m = maximumExcFailNoIn
+ }
+ message = fmt.Sprintf(m, name, maximum)
+ } else {
+ m := maximumIncFail
+ if exclusive {
+ m = maximumExcFail
+ }
+ message = fmt.Sprintf(m, name, in, maximum)
+ }
+ return &Validation{
+ code: MaxFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: message,
+ }
+}
+
+// ExceedsMinimumInt error for when minimum validation fails
+func ExceedsMinimumInt(name, in string, minimum int64, exclusive bool, value interface{}) *Validation {
+ var message string
+ if in == "" {
+ m := minIncFailNoIn
+ if exclusive {
+ m = minExcFailNoIn
+ }
+ message = fmt.Sprintf(m, name, minimum)
+ } else {
+ m := minIncFail
+ if exclusive {
+ m = minExcFail
+ }
+ message = fmt.Sprintf(m, name, in, minimum)
+ }
+ return &Validation{
+ code: MinFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: message,
+ }
+}
+
+// ExceedsMinimumUint error for when minimum validation fails
+func ExceedsMinimumUint(name, in string, minimum uint64, exclusive bool, value interface{}) *Validation {
+ var message string
+ if in == "" {
+ m := minIncFailNoIn
+ if exclusive {
+ m = minExcFailNoIn
+ }
+ message = fmt.Sprintf(m, name, minimum)
+ } else {
+ m := minIncFail
+ if exclusive {
+ m = minExcFail
+ }
+ message = fmt.Sprintf(m, name, in, minimum)
+ }
+ return &Validation{
+ code: MinFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: message,
+ }
+}
+
+// ExceedsMinimum error for when minimum validation fails
+func ExceedsMinimum(name, in string, minimum float64, exclusive bool, value interface{}) *Validation {
+ var message string
+ if in == "" {
+ m := minIncFailNoIn
+ if exclusive {
+ m = minExcFailNoIn
+ }
+ message = fmt.Sprintf(m, name, minimum)
+ } else {
+ m := minIncFail
+ if exclusive {
+ m = minExcFail
+ }
+ message = fmt.Sprintf(m, name, in, minimum)
+ }
+ return &Validation{
+ code: MinFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: message,
+ }
+}
+
+// NotMultipleOf error for when multiple of validation fails
+func NotMultipleOf(name, in string, multiple, value interface{}) *Validation {
+ var msg string
+ if in == "" {
+ msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple)
+ } else {
+ msg = fmt.Sprintf(multipleOfFail, name, in, multiple)
+ }
+ return &Validation{
+ code: MultipleOfFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: msg,
+ }
+}
+
+// EnumFail error for when an enum validation fails
+func EnumFail(name, in string, value interface{}, values []interface{}) *Validation {
+ var msg string
+ if in == "" {
+ msg = fmt.Sprintf(enumFailNoIn, name, values)
+ } else {
+ msg = fmt.Sprintf(enumFail, name, in, values)
+ }
+
+ return &Validation{
+ code: EnumFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ Values: values,
+ message: msg,
+ }
+}
+
+// Required error for when a value is missing
+func Required(name, in string, value interface{}) *Validation {
+ var msg string
+ if in == "" {
+ msg = fmt.Sprintf(requiredFailNoIn, name)
+ } else {
+ msg = fmt.Sprintf(requiredFail, name, in)
+ }
+ return &Validation{
+ code: RequiredFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: msg,
+ }
+}
+
+// ReadOnly error for when a value is present in request
+func ReadOnly(name, in string, value interface{}) *Validation {
+ var msg string
+ if in == "" {
+ msg = fmt.Sprintf(readOnlyFailNoIn, name)
+ } else {
+ msg = fmt.Sprintf(readOnlyFail, name, in)
+ }
+ return &Validation{
+ code: ReadOnlyFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: msg,
+ }
+}
+
+// TooLong error for when a string is too long
+func TooLong(name, in string, maximum int64, value interface{}) *Validation {
+ var msg string
+ if in == "" {
+ msg = fmt.Sprintf(tooLongMessageNoIn, name, maximum)
+ } else {
+ msg = fmt.Sprintf(tooLongMessage, name, in, maximum)
+ }
+ return &Validation{
+ code: TooLongFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: msg,
+ }
+}
+
+// TooShort error for when a string is too short
+func TooShort(name, in string, minimum int64, value interface{}) *Validation {
+ var msg string
+ if in == "" {
+ msg = fmt.Sprintf(tooShortMessageNoIn, name, minimum)
+ } else {
+ msg = fmt.Sprintf(tooShortMessage, name, in, minimum)
+ }
+
+ return &Validation{
+ code: TooShortFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: msg,
+ }
+}
+
+// FailedPattern error for when a string fails a regex pattern match
+// the pattern that is returned is the ECMA syntax version of the pattern not the golang version.
+func FailedPattern(name, in, pattern string, value interface{}) *Validation {
+ var msg string
+ if in == "" {
+ msg = fmt.Sprintf(patternFailNoIn, name, pattern)
+ } else {
+ msg = fmt.Sprintf(patternFail, name, in, pattern)
+ }
+
+ return &Validation{
+ code: PatternFailCode,
+ Name: name,
+ In: in,
+ Value: value,
+ message: msg,
+ }
+}
+
+// MultipleOfMustBePositive error for when a
+// multipleOf factor is negative
+func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation {
+ return &Validation{
+ code: MultipleOfMustBePositiveCode,
+ Name: name,
+ In: in,
+ Value: factor,
+ message: fmt.Sprintf(multipleOfMustBePositive, name, factor),
+ }
+}
diff --git a/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/vendor/github.com/go-openapi/jsonpointer/.editorconfig
new file mode 100644
index 00000000..3152da69
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore
new file mode 100644
index 00000000..769c2440
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore
@@ -0,0 +1 @@
+secrets.yml
diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
new file mode 100644
index 00000000..50063062
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
@@ -0,0 +1,62 @@
+version: "2"
+linters:
+ default: all
+ disable:
+ - cyclop
+ - depguard
+ - errchkjson
+ - errorlint
+ - exhaustruct
+ - forcetypeassert
+ - funlen
+ - gochecknoglobals
+ - gochecknoinits
+ - gocognit
+ - godot
+ - godox
+ - gosmopolitan
+ - inamedparam
+ - ireturn
+ - lll
+ - musttag
+ - nestif
+ - nlreturn
+ - nonamedreturns
+ - paralleltest
+ - testpackage
+ - thelper
+ - tparallel
+ - unparam
+ - varnamelen
+ - whitespace
+ - wrapcheck
+ - wsl
+ settings:
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+ gocyclo:
+ min-complexity: 45
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md
new file mode 100644
index 00000000..0108f1d5
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -0,0 +1,19 @@
+# gojsonpointer [](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonpointer)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonpointer)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonpointer)
+
+An implementation of JSON Pointer - Go language
+
+## Status
+Completed YES
+
+Tested YES
+
+## References
+http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+
+### Note
+The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
diff --git a/vendor/github.com/go-openapi/jsonpointer/errors.go b/vendor/github.com/go-openapi/jsonpointer/errors.go
new file mode 100644
index 00000000..b84343d9
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/errors.go
@@ -0,0 +1,18 @@
+package jsonpointer
+
+type pointerError string
+
+func (e pointerError) Error() string {
+ return string(e)
+}
+
+const (
+ // ErrPointer is an error raised by the jsonpointer package
+ ErrPointer pointerError = "JSON pointer error"
+
+ // ErrInvalidStart states that a JSON pointer must start with a separator ("/")
+ ErrInvalidStart pointerError = `JSON pointer must be empty or start with a "` + pointerSeparator
+
+ // ErrUnsupportedValueType indicates that a value of the wrong type is being set
+ ErrUnsupportedValueType pointerError = "only structs, pointers, maps and slices are supported for setting values"
+)
diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go
new file mode 100644
index 00000000..61362105
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -0,0 +1,540 @@
+// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author sigu-399
+// author-github https://github.com/sigu-399
+// author-mail sigu.399@gmail.com
+//
+// repository-name jsonpointer
+// repository-desc An implementation of JSON Pointer - Go language
+//
+// description Main and unique file.
+//
+// created 25-02-2013
+
+package jsonpointer
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/go-openapi/swag"
+)
+
+const (
+ emptyPointer = ``
+ pointerSeparator = `/`
+)
+
+var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
+var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
+
+// JSONPointable is an interface for structs to implement when they need to customize the
+// json pointer process
+type JSONPointable interface {
+ JSONLookup(string) (any, error)
+}
+
+// JSONSetable is an interface for structs to implement when they need to customize the
+// json pointer process
+type JSONSetable interface {
+ JSONSet(string, any) error
+}
+
+// New creates a new json pointer for the given string
+func New(jsonPointerString string) (Pointer, error) {
+
+ var p Pointer
+ err := p.parse(jsonPointerString)
+ return p, err
+
+}
+
+// Pointer the json pointer reprsentation
+type Pointer struct {
+ referenceTokens []string
+}
+
+// "Constructor", parses the given string JSON pointer
+func (p *Pointer) parse(jsonPointerString string) error {
+
+ var err error
+
+ if jsonPointerString != emptyPointer {
+ if !strings.HasPrefix(jsonPointerString, pointerSeparator) {
+ err = errors.Join(ErrInvalidStart, ErrPointer)
+ } else {
+ referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
+ p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
+ }
+ }
+
+ return err
+}
+
+// Get uses the pointer to retrieve a value from a JSON document
+func (p *Pointer) Get(document any) (any, reflect.Kind, error) {
+ return p.get(document, swag.DefaultJSONNameProvider)
+}
+
+// Set uses the pointer to set a value from a JSON document
+func (p *Pointer) Set(document any, value any) (any, error) {
+ return document, p.set(document, value, swag.DefaultJSONNameProvider)
+}
+
+// GetForToken gets a value for a json pointer token 1 level deep
+func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) {
+ return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
+}
+
+// SetForToken gets a value for a json pointer token 1 level deep
+func SetForToken(document any, decodedToken string, value any) (any, error) {
+ return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
+}
+
+func isNil(input any) bool {
+ if input == nil {
+ return true
+ }
+
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
+func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
+ rValue := reflect.Indirect(reflect.ValueOf(node))
+ kind := rValue.Kind()
+ if isNil(node) {
+ return nil, kind, fmt.Errorf("nil value has no field %q: %w", decodedToken, ErrPointer)
+ }
+
+ switch typed := node.(type) {
+ case JSONPointable:
+ r, err := typed.JSONLookup(decodedToken)
+ if err != nil {
+ return nil, kind, err
+ }
+ return r, kind, nil
+ case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect
+ return getSingleImpl(*typed, decodedToken, nameProvider)
+ }
+
+ switch kind { //nolint:exhaustive
+ case reflect.Struct:
+ nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
+ if !ok {
+ return nil, kind, fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
+ }
+ fld := rValue.FieldByName(nm)
+ return fld.Interface(), kind, nil
+
+ case reflect.Map:
+ kv := reflect.ValueOf(decodedToken)
+ mv := rValue.MapIndex(kv)
+
+ if mv.IsValid() {
+ return mv.Interface(), kind, nil
+ }
+ return nil, kind, fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer)
+
+ case reflect.Slice:
+ tokenIndex, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return nil, kind, err
+ }
+ sLength := rValue.Len()
+ if tokenIndex < 0 || tokenIndex >= sLength {
+ return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength-1, tokenIndex, ErrPointer)
+ }
+
+ elem := rValue.Index(tokenIndex)
+ return elem.Interface(), kind, nil
+
+ default:
+ return nil, kind, fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
+ }
+
+}
+
+func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error {
+ rValue := reflect.Indirect(reflect.ValueOf(node))
+
+ // Check for nil to prevent panic when calling rValue.Type()
+ if isNil(node) {
+ return fmt.Errorf("cannot set field %q on nil value: %w", decodedToken, ErrPointer)
+ }
+
+ if ns, ok := node.(JSONSetable); ok { // pointer impl
+ return ns.JSONSet(decodedToken, data)
+ }
+
+ if rValue.Type().Implements(jsonSetableType) {
+ return node.(JSONSetable).JSONSet(decodedToken, data)
+ }
+
+ switch rValue.Kind() { //nolint:exhaustive
+ case reflect.Struct:
+ nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
+ if !ok {
+ return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
+ }
+ fld := rValue.FieldByName(nm)
+ if fld.IsValid() {
+ fld.Set(reflect.ValueOf(data))
+ }
+ return nil
+
+ case reflect.Map:
+ kv := reflect.ValueOf(decodedToken)
+ rValue.SetMapIndex(kv, reflect.ValueOf(data))
+ return nil
+
+ case reflect.Slice:
+ tokenIndex, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return err
+ }
+ sLength := rValue.Len()
+ if tokenIndex < 0 || tokenIndex >= sLength {
+ return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer)
+ }
+
+ elem := rValue.Index(tokenIndex)
+ if !elem.CanSet() {
+ return fmt.Errorf("can't set slice index %s to %v: %w", decodedToken, data, ErrPointer)
+ }
+ elem.Set(reflect.ValueOf(data))
+ return nil
+
+ default:
+ return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
+ }
+
+}
+
+func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
+
+ if nameProvider == nil {
+ nameProvider = swag.DefaultJSONNameProvider
+ }
+
+ kind := reflect.Invalid
+
+ // Full document when empty
+ if len(p.referenceTokens) == 0 {
+ return node, kind, nil
+ }
+
+ for _, token := range p.referenceTokens {
+ decodedToken := Unescape(token)
+
+ r, knd, err := getSingleImpl(node, decodedToken, nameProvider)
+ if err != nil {
+ return nil, knd, err
+ }
+ node = r
+ }
+
+ rValue := reflect.ValueOf(node)
+ kind = rValue.Kind()
+
+ return node, kind, nil
+}
+
+func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
+ knd := reflect.ValueOf(node).Kind()
+
+ if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
+ return errors.Join(
+ ErrUnsupportedValueType,
+ ErrPointer,
+ )
+ }
+
+ if nameProvider == nil {
+ nameProvider = swag.DefaultJSONNameProvider
+ }
+
+ // Full document when empty
+ if len(p.referenceTokens) == 0 {
+ return nil
+ }
+
+ lastI := len(p.referenceTokens) - 1
+ for i, token := range p.referenceTokens {
+ isLastToken := i == lastI
+ decodedToken := Unescape(token)
+
+ if isLastToken {
+
+ return setSingleImpl(node, data, decodedToken, nameProvider)
+ }
+
+ // Check for nil during traversal
+ if isNil(node) {
+ return fmt.Errorf("cannot traverse through nil value at %q: %w", decodedToken, ErrPointer)
+ }
+
+ rValue := reflect.Indirect(reflect.ValueOf(node))
+ kind := rValue.Kind()
+
+ if rValue.Type().Implements(jsonPointableType) {
+ r, err := node.(JSONPointable).JSONLookup(decodedToken)
+ if err != nil {
+ return err
+ }
+ fld := reflect.ValueOf(r)
+ if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
+ node = fld.Addr().Interface()
+ continue
+ }
+ node = r
+ continue
+ }
+
+ switch kind { //nolint:exhaustive
+ case reflect.Struct:
+ nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
+ if !ok {
+ return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
+ }
+ fld := rValue.FieldByName(nm)
+ if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
+ node = fld.Addr().Interface()
+ continue
+ }
+ node = fld.Interface()
+
+ case reflect.Map:
+ kv := reflect.ValueOf(decodedToken)
+ mv := rValue.MapIndex(kv)
+
+ if !mv.IsValid() {
+ return fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer)
+ }
+ if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr {
+ node = mv.Addr().Interface()
+ continue
+ }
+ node = mv.Interface()
+
+ case reflect.Slice:
+ tokenIndex, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return err
+ }
+ sLength := rValue.Len()
+ if tokenIndex < 0 || tokenIndex >= sLength {
+ return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer)
+ }
+
+ elem := rValue.Index(tokenIndex)
+ if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr {
+ node = elem.Addr().Interface()
+ continue
+ }
+ node = elem.Interface()
+
+ default:
+ return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
+ }
+
+ }
+
+ return nil
+}
+
+// DecodedTokens returns the decoded tokens
+func (p *Pointer) DecodedTokens() []string {
+ result := make([]string, 0, len(p.referenceTokens))
+ for _, t := range p.referenceTokens {
+ result = append(result, Unescape(t))
+ }
+ return result
+}
+
+// IsEmpty returns true if this is an empty json pointer
+// this indicates that it points to the root document
+func (p *Pointer) IsEmpty() bool {
+ return len(p.referenceTokens) == 0
+}
+
+// Pointer to string representation function
+func (p *Pointer) String() string {
+
+ if len(p.referenceTokens) == 0 {
+ return emptyPointer
+ }
+
+ pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator)
+
+ return pointerString
+}
+
+func (p *Pointer) Offset(document string) (int64, error) {
+ dec := json.NewDecoder(strings.NewReader(document))
+ var offset int64
+ for _, ttk := range p.DecodedTokens() {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ offset, err = offsetSingleObject(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ case '[':
+ offset, err = offsetSingleArray(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
+ }
+ }
+ return offset, nil
+}
+
+func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
+ for dec.More() {
+ offset := dec.InputOffset()
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ case string:
+ if tk == decodedToken {
+ return offset, nil
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
+ }
+ }
+ return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer)
+}
+
+func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
+ idx, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return 0, fmt.Errorf("token reference %q is not a number: %v: %w", decodedToken, err, ErrPointer)
+ }
+ var i int
+ for i = 0; i < idx && dec.More(); i++ {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ }
+ }
+
+ if !dec.More() {
+ return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer)
+ }
+ return dec.InputOffset(), nil
+}
+
+// drainSingle drains a single level of object or array.
+// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed.
+func drainSingle(dec *json.Decoder) error {
+ for dec.More() {
+ tk, err := dec.Token()
+ if err != nil {
+ return err
+ }
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // Consumes the ending delim
+ if _, err := dec.Token(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Specific JSON pointer encoding here
+// ~0 => ~
+// ~1 => /
+// ... and vice versa
+
+const (
+ encRefTok0 = `~0`
+ encRefTok1 = `~1`
+ decRefTok0 = `~`
+ decRefTok1 = `/`
+)
+
+// Unescape unescapes a json pointer reference token string to the original representation
+func Unescape(token string) string {
+ step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1)
+ step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0)
+ return step2
+}
+
+// Escape escapes a pointer reference token string
+func Escape(token string) string {
+ step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0)
+ step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1)
+ return step2
+}
diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore
new file mode 100644
index 00000000..769c2440
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/.gitignore
@@ -0,0 +1 @@
+secrets.yml
diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml
new file mode 100644
index 00000000..22f8d21c
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md
new file mode 100644
index 00000000..c7fc2049
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/README.md
@@ -0,0 +1,19 @@
+# gojsonreference [](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonreference)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonreference)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonreference)
+
+An implementation of JSON Reference - Go language
+
+## Status
+Feature complete. Stable API
+
+## Dependencies
+* https://github.com/go-openapi/jsonpointer
+
+## References
+
+* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
new file mode 100644
index 00000000..f0610cf1
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
@@ -0,0 +1,69 @@
+package internal
+
+import (
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+const (
+ defaultHTTPPort = ":80"
+ defaultHTTPSPort = ":443"
+)
+
+// Regular expressions used by the normalizations
+var rxPort = regexp.MustCompile(`(:\d+)/?$`)
+var rxDupSlashes = regexp.MustCompile(`/{2,}`)
+
+// NormalizeURL will normalize the specified URL
+// This was added to replace a previous call to the no longer maintained purell library:
+// The call that was used looked like the following:
+//
+// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
+//
+// To explain all that was included in the call above, purell.FlagsSafe was really just the following:
+// - FlagLowercaseScheme
+// - FlagLowercaseHost
+// - FlagRemoveDefaultPort
+// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
+//
+// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment.
+func NormalizeURL(u *url.URL) {
+ lowercaseScheme(u)
+ lowercaseHost(u)
+ removeDefaultPort(u)
+ removeDuplicateSlashes(u)
+
+ u.RawPath = ""
+ u.RawFragment = ""
+}
+
+func lowercaseScheme(u *url.URL) {
+ if len(u.Scheme) > 0 {
+ u.Scheme = strings.ToLower(u.Scheme)
+ }
+}
+
+func lowercaseHost(u *url.URL) {
+ if len(u.Host) > 0 {
+ u.Host = strings.ToLower(u.Host)
+ }
+}
+
+func removeDefaultPort(u *url.URL) {
+ if len(u.Host) > 0 {
+ scheme := strings.ToLower(u.Scheme)
+ u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
+ if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) {
+ return ""
+ }
+ return val
+ })
+ }
+}
+
+func removeDuplicateSlashes(u *url.URL) {
+ if len(u.Path) > 0 {
+ u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
+ }
+}
diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go
new file mode 100644
index 00000000..cfdef03e
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/reference.go
@@ -0,0 +1,158 @@
+// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author sigu-399
+// author-github https://github.com/sigu-399
+// author-mail sigu.399@gmail.com
+//
+// repository-name jsonreference
+// repository-desc An implementation of JSON Reference - Go language
+//
+// description Main and unique file.
+//
+// created 26-02-2013
+
+package jsonreference
+
+import (
+ "errors"
+ "net/url"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/jsonreference/internal"
+)
+
+const (
+ fragmentRune = `#`
+)
+
+// New creates a new reference for the given string
+func New(jsonReferenceString string) (Ref, error) {
+
+ var r Ref
+ err := r.parse(jsonReferenceString)
+ return r, err
+
+}
+
+// MustCreateRef parses the ref string and panics when it's invalid.
+// Use the New method for a version that returns an error
+func MustCreateRef(ref string) Ref {
+ r, err := New(ref)
+ if err != nil {
+ panic(err)
+ }
+ return r
+}
+
+// Ref represents a json reference object
+type Ref struct {
+ referenceURL *url.URL
+ referencePointer jsonpointer.Pointer
+
+ HasFullURL bool
+ HasURLPathOnly bool
+ HasFragmentOnly bool
+ HasFileScheme bool
+ HasFullFilePath bool
+}
+
+// GetURL gets the URL for this reference
+func (r *Ref) GetURL() *url.URL {
+ return r.referenceURL
+}
+
+// GetPointer gets the json pointer for this reference
+func (r *Ref) GetPointer() *jsonpointer.Pointer {
+ return &r.referencePointer
+}
+
+// String returns the best version of the url for this reference
+func (r *Ref) String() string {
+
+ if r.referenceURL != nil {
+ return r.referenceURL.String()
+ }
+
+ if r.HasFragmentOnly {
+ return fragmentRune + r.referencePointer.String()
+ }
+
+ return r.referencePointer.String()
+}
+
+// IsRoot returns true if this reference is a root document
+func (r *Ref) IsRoot() bool {
+ return r.referenceURL != nil &&
+ !r.IsCanonical() &&
+ !r.HasURLPathOnly &&
+ r.referenceURL.Fragment == ""
+}
+
+// IsCanonical returns true when this pointer starts with http(s):// or file://
+func (r *Ref) IsCanonical() bool {
+ return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL)
+}
+
+// "Constructor", parses the given string JSON reference
+func (r *Ref) parse(jsonReferenceString string) error {
+
+ parsed, err := url.Parse(jsonReferenceString)
+ if err != nil {
+ return err
+ }
+
+ internal.NormalizeURL(parsed)
+
+ r.referenceURL = parsed
+ refURL := r.referenceURL
+
+ if refURL.Scheme != "" && refURL.Host != "" {
+ r.HasFullURL = true
+ } else {
+ if refURL.Path != "" {
+ r.HasURLPathOnly = true
+ } else if refURL.RawQuery == "" && refURL.Fragment != "" {
+ r.HasFragmentOnly = true
+ }
+ }
+
+ r.HasFileScheme = refURL.Scheme == "file"
+ r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/")
+
+ // invalid json-pointer error means url has no json-pointer fragment. simply ignore error
+ r.referencePointer, _ = jsonpointer.New(refURL.Fragment)
+
+ return nil
+}
+
+// Inherits creates a new reference from a parent and a child
+// If the child cannot inherit from the parent, an error is returned
+func (r *Ref) Inherits(child Ref) (*Ref, error) {
+ childURL := child.GetURL()
+ parentURL := r.GetURL()
+ if childURL == nil {
+ return nil, errors.New("child url is nil")
+ }
+ if parentURL == nil {
+ return &child, nil
+ }
+
+ ref, err := New(parentURL.ResolveReference(childURL).String())
+ if err != nil {
+ return nil, err
+ }
+ return &ref, nil
+}
diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig
new file mode 100644
index 00000000..3152da69
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore
new file mode 100644
index 00000000..e4f15f17
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/.gitignore
@@ -0,0 +1,4 @@
+secrets.yml
+coverage.out
+profile.cov
+profile.out
diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml
new file mode 100644
index 00000000..22f8d21c
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml
new file mode 100644
index 00000000..cd4a7c33
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/.travis.yml
@@ -0,0 +1,25 @@
+after_success:
+- bash <(curl -s https://codecov.io/bash)
+go:
+- 1.16.x
+- 1.x
+install:
+- go get gotest.tools/gotestsum
+language: go
+arch:
+- amd64
+- ppc64le
+jobs:
+ include:
+ # include linting job, but only for latest go version and amd64 arch
+ - go: 1.x
+ arch: amd64
+ install:
+ go get github.com/golangci/golangci-lint/cmd/golangci-lint
+ script:
+ - golangci-lint run --new-from-rev master
+notifications:
+ slack:
+ secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM=
+script:
+- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./...
diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md
new file mode 100644
index 00000000..f8bd440d
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/README.md
@@ -0,0 +1,6 @@
+# Loads OAI specs [](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/loads)
+
+[](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [](http://godoc.org/github.com/go-openapi/loads)
+[](https://goreportcard.com/report/github.com/go-openapi/loads)
+
+Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents.
diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go
new file mode 100644
index 00000000..5bcaef5d
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package loads provides document loading methods for swagger (OAI) specifications.
+//
+// It is used by other go-openapi packages to load and run analysis on local or remote spec documents.
+package loads
diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go
new file mode 100644
index 00000000..b2d1e034
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/loaders.go
@@ -0,0 +1,133 @@
+package loads
+
+import (
+ "encoding/json"
+ "errors"
+ "net/url"
+
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/swag"
+)
+
+var (
+ // Default chain of loaders, defined at the package level.
+ //
+ // By default this matches json and yaml documents.
+ //
+ // May be altered with AddLoader().
+ loaders *loader
+)
+
+func init() {
+ jsonLoader := &loader{
+ DocLoaderWithMatch: DocLoaderWithMatch{
+ Match: func(_ string) bool {
+ return true
+ },
+ Fn: JSONDoc,
+ },
+ }
+
+ loaders = jsonLoader.WithHead(&loader{
+ DocLoaderWithMatch: DocLoaderWithMatch{
+ Match: swag.YAMLMatcher,
+ Fn: swag.YAMLDoc,
+ },
+ })
+
+ // sets the global default loader for go-openapi/spec
+ spec.PathLoader = loaders.Load
+}
+
+// DocLoader represents a doc loader type
+type DocLoader func(string) (json.RawMessage, error)
+
+// DocMatcher represents a predicate to check if a loader matches
+type DocMatcher func(string) bool
+
+// DocLoaderWithMatch describes a loading function for a given extension match.
+type DocLoaderWithMatch struct {
+ Fn DocLoader
+ Match DocMatcher
+}
+
+// NewDocLoaderWithMatch builds a DocLoaderWithMatch to be used in load options
+func NewDocLoaderWithMatch(fn DocLoader, matcher DocMatcher) DocLoaderWithMatch {
+ return DocLoaderWithMatch{
+ Fn: fn,
+ Match: matcher,
+ }
+}
+
+type loader struct {
+ DocLoaderWithMatch
+ Next *loader
+}
+
+// WithHead adds a loader at the head of the current stack
+func (l *loader) WithHead(head *loader) *loader {
+ if head == nil {
+ return l
+ }
+ head.Next = l
+ return head
+}
+
+// WithNext adds a loader at the trail of the current stack
+func (l *loader) WithNext(next *loader) *loader {
+ l.Next = next
+ return next
+}
+
+// Load the raw document from path
+func (l *loader) Load(path string) (json.RawMessage, error) {
+ _, erp := url.Parse(path)
+ if erp != nil {
+ return nil, erp
+ }
+
+ lastErr := errors.New("no loader matched") // default error if no match was found
+ for ldr := l; ldr != nil; ldr = ldr.Next {
+ if ldr.Match != nil && !ldr.Match(path) {
+ continue
+ }
+
+ // try then move to next one if there is an error
+ b, err := ldr.Fn(path)
+ if err == nil {
+ return b, nil
+ }
+
+ lastErr = err
+ }
+
+ return nil, lastErr
+}
+
+// JSONDoc loads a json document from either a file or a remote url
+func JSONDoc(path string) (json.RawMessage, error) {
+ data, err := swag.LoadFromFileOrHTTP(path)
+ if err != nil {
+ return nil, err
+ }
+ return json.RawMessage(data), nil
+}
+
+// AddLoader for a document, executed before other previously set loaders.
+//
+// This sets the configuration at the package level.
+//
+// NOTE:
+// - this updates the default loader used by github.com/go-openapi/spec
+// - since this sets package level globals, you shouln't call this concurrently
+func AddLoader(predicate DocMatcher, load DocLoader) {
+ loaders = loaders.WithHead(&loader{
+ DocLoaderWithMatch: DocLoaderWithMatch{
+ Match: predicate,
+ Fn: load,
+ },
+ })
+
+ // sets the global default loader for go-openapi/spec
+ spec.PathLoader = loaders.Load
+}
diff --git a/vendor/github.com/go-openapi/loads/options.go b/vendor/github.com/go-openapi/loads/options.go
new file mode 100644
index 00000000..f8305d56
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/options.go
@@ -0,0 +1,61 @@
+package loads
+
+type options struct {
+ loader *loader
+}
+
+func defaultOptions() *options {
+ return &options{
+ loader: loaders,
+ }
+}
+
+func loaderFromOptions(options []LoaderOption) *loader {
+ opts := defaultOptions()
+ for _, apply := range options {
+ apply(opts)
+ }
+
+ return opts.loader
+}
+
+// LoaderOption allows to fine-tune the spec loader behavior
+type LoaderOption func(*options)
+
+// WithDocLoader sets a custom loader for loading specs
+func WithDocLoader(l DocLoader) LoaderOption {
+ return func(opt *options) {
+ if l == nil {
+ return
+ }
+ opt.loader = &loader{
+ DocLoaderWithMatch: DocLoaderWithMatch{
+ Fn: l,
+ },
+ }
+ }
+}
+
+// WithDocLoaderMatches sets a chain of custom loaders for loading specs
+// for different extension matches.
+//
+// Loaders are executed in the order of provided DocLoaderWithMatch'es.
+func WithDocLoaderMatches(l ...DocLoaderWithMatch) LoaderOption {
+ return func(opt *options) {
+ var final, prev *loader
+ for _, ldr := range l {
+ if ldr.Fn == nil {
+ continue
+ }
+
+ if prev == nil {
+ final = &loader{DocLoaderWithMatch: ldr}
+ prev = final
+ continue
+ }
+
+ prev = prev.WithNext(&loader{DocLoaderWithMatch: ldr})
+ }
+ opt.loader = final
+ }
+}
diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go
new file mode 100644
index 00000000..c9039cd5
--- /dev/null
+++ b/vendor/github.com/go-openapi/loads/spec.go
@@ -0,0 +1,275 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package loads
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "fmt"
+
+ "github.com/go-openapi/analysis"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/swag"
+)
+
+func init() {
+ gob.Register(map[string]interface{}{})
+ gob.Register([]interface{}{})
+}
+
+// Document represents a swagger spec document
+type Document struct {
+ // specAnalyzer
+ Analyzer *analysis.Spec
+ spec *spec.Swagger
+ specFilePath string
+ origSpec *spec.Swagger
+ schema *spec.Schema
+ pathLoader *loader
+ raw json.RawMessage
+}
+
+// JSONSpec loads a spec from a json document
+func JSONSpec(path string, options ...LoaderOption) (*Document, error) {
+ data, err := JSONDoc(path)
+ if err != nil {
+ return nil, err
+ }
+ // convert to json
+ doc, err := Analyzed(data, "", options...)
+ if err != nil {
+ return nil, err
+ }
+
+ doc.specFilePath = path
+
+ return doc, nil
+}
+
+// Embedded returns a Document based on embedded specs. No analysis is required
+func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, error) {
+ var origSpec, flatSpec spec.Swagger
+ if err := json.Unmarshal(orig, &origSpec); err != nil {
+ return nil, err
+ }
+ if err := json.Unmarshal(flat, &flatSpec); err != nil {
+ return nil, err
+ }
+ return &Document{
+ raw: orig,
+ origSpec: &origSpec,
+ spec: &flatSpec,
+ pathLoader: loaderFromOptions(options),
+ }, nil
+}
+
+// Spec loads a new spec document from a local or remote path
+func Spec(path string, options ...LoaderOption) (*Document, error) {
+ ldr := loaderFromOptions(options)
+
+ b, err := ldr.Load(path)
+ if err != nil {
+ return nil, err
+ }
+
+ document, err := Analyzed(b, "", options...)
+ if err != nil {
+ return nil, err
+ }
+
+ document.specFilePath = path
+ document.pathLoader = ldr
+
+ return document, nil
+}
+
+// Analyzed creates a new analyzed spec document for a root json.RawMessage.
+func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*Document, error) {
+ if version == "" {
+ version = "2.0"
+ }
+ if version != "2.0" {
+ return nil, fmt.Errorf("spec version %q is not supported", version)
+ }
+
+ raw, err := trimData(data) // trim blanks, then convert yaml docs into json
+ if err != nil {
+ return nil, err
+ }
+
+ swspec := new(spec.Swagger)
+ if err = json.Unmarshal(raw, swspec); err != nil {
+ return nil, err
+ }
+
+ origsqspec, err := cloneSpec(swspec)
+ if err != nil {
+ return nil, err
+ }
+
+ d := &Document{
+ Analyzer: analysis.New(swspec), // NOTE: at this moment, analysis does not follow $refs to documents outside the root doc
+ schema: spec.MustLoadSwagger20Schema(),
+ spec: swspec,
+ raw: raw,
+ origSpec: origsqspec,
+ pathLoader: loaderFromOptions(options),
+ }
+
+ return d, nil
+}
+
+func trimData(in json.RawMessage) (json.RawMessage, error) {
+ trimmed := bytes.TrimSpace(in)
+ if len(trimmed) == 0 {
+ return in, nil
+ }
+
+ if trimmed[0] == '{' || trimmed[0] == '[' {
+ return trimmed, nil
+ }
+
+ // assume yaml doc: convert it to json
+ yml, err := swag.BytesToYAMLDoc(trimmed)
+ if err != nil {
+ return nil, fmt.Errorf("analyzed: %v", err)
+ }
+
+ d, err := swag.YAMLToJSON(yml)
+ if err != nil {
+ return nil, fmt.Errorf("analyzed: %v", err)
+ }
+
+ return d, nil
+}
+
+// Expanded expands the $ref fields in the spec document and returns a new spec document
+func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) {
+ swspec := new(spec.Swagger)
+ if err := json.Unmarshal(d.raw, swspec); err != nil {
+ return nil, err
+ }
+
+ var expandOptions *spec.ExpandOptions
+ if len(options) > 0 {
+ expandOptions = options[0]
+ if expandOptions.RelativeBase == "" {
+ expandOptions.RelativeBase = d.specFilePath
+ }
+ } else {
+ expandOptions = &spec.ExpandOptions{
+ RelativeBase: d.specFilePath,
+ }
+ }
+
+ if expandOptions.PathLoader == nil {
+ if d.pathLoader != nil {
+ // use loader from Document options
+ expandOptions.PathLoader = d.pathLoader.Load
+ } else {
+ // use package level loader
+ expandOptions.PathLoader = loaders.Load
+ }
+ }
+
+ if err := spec.ExpandSpec(swspec, expandOptions); err != nil {
+ return nil, err
+ }
+
+ dd := &Document{
+ Analyzer: analysis.New(swspec),
+ spec: swspec,
+ specFilePath: d.specFilePath,
+ schema: spec.MustLoadSwagger20Schema(),
+ raw: d.raw,
+ origSpec: d.origSpec,
+ }
+ return dd, nil
+}
+
+// BasePath the base path for the API specified by this spec
+func (d *Document) BasePath() string {
+ return d.spec.BasePath
+}
+
+// Version returns the version of this spec
+func (d *Document) Version() string {
+ return d.spec.Swagger
+}
+
+// Schema returns the swagger 2.0 schema
+func (d *Document) Schema() *spec.Schema {
+ return d.schema
+}
+
+// Spec returns the swagger spec object model
+func (d *Document) Spec() *spec.Swagger {
+ return d.spec
+}
+
+// Host returns the host for the API
+func (d *Document) Host() string {
+ return d.spec.Host
+}
+
+// Raw returns the raw swagger spec as json bytes
+func (d *Document) Raw() json.RawMessage {
+ return d.raw
+}
+
+// OrigSpec yields the original spec
+func (d *Document) OrigSpec() *spec.Swagger {
+ return d.origSpec
+}
+
+// ResetDefinitions gives a shallow copy with the models reset to the original spec
+func (d *Document) ResetDefinitions() *Document {
+ defs := make(map[string]spec.Schema, len(d.origSpec.Definitions))
+ for k, v := range d.origSpec.Definitions {
+ defs[k] = v
+ }
+
+ d.spec.Definitions = defs
+ return d
+}
+
+// Pristine creates a new pristine document instance based on the input data
+func (d *Document) Pristine() *Document {
+ raw, _ := json.Marshal(d.Spec())
+ dd, _ := Analyzed(raw, d.Version())
+ dd.pathLoader = d.pathLoader
+ dd.specFilePath = d.specFilePath
+
+ return dd
+}
+
+// SpecFilePath returns the file path of the spec if one is defined
+func (d *Document) SpecFilePath() string {
+ return d.specFilePath
+}
+
+func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) {
+ var b bytes.Buffer
+ if err := gob.NewEncoder(&b).Encode(src); err != nil {
+ return nil, err
+ }
+
+ var dst spec.Swagger
+ if err := gob.NewDecoder(&b).Decode(&dst); err != nil {
+ return nil, err
+ }
+ return &dst, nil
+}
diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig
new file mode 100644
index 00000000..3152da69
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-openapi/runtime/.gitattributes b/vendor/github.com/go-openapi/runtime/.gitattributes
new file mode 100644
index 00000000..d207b180
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/.gitattributes
@@ -0,0 +1 @@
+*.go text eol=lf
diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore
new file mode 100644
index 00000000..fea8b84e
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/.gitignore
@@ -0,0 +1,5 @@
+secrets.yml
+coverage.out
+*.cov
+*.out
+playground
diff --git a/vendor/github.com/go-openapi/runtime/.golangci.yml b/vendor/github.com/go-openapi/runtime/.golangci.yml
new file mode 100644
index 00000000..1c75557b
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/.golangci.yml
@@ -0,0 +1,62 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - nilerr # nilerr crashes on this repo
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/runtime/LICENSE b/vendor/github.com/go-openapi/runtime/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md
new file mode 100644
index 00000000..b07e0ad9
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/README.md
@@ -0,0 +1,10 @@
+# runtime [](https://github.com/go-openapi/runtime/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/runtime)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/runtime)
+[](https://goreportcard.com/report/github.com/go-openapi/runtime)
+
+# go OpenAPI toolkit runtime
+
+The runtime component for use in code generation or as untyped usage.
diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go
new file mode 100644
index 00000000..f8fb4822
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/bytestream.go
@@ -0,0 +1,222 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+
+ "github.com/go-openapi/swag"
+)
+
+func defaultCloser() error { return nil }
+
+type byteStreamOpt func(opts *byteStreamOpts)
+
+// ClosesStream when the bytestream consumer or producer is finished
+func ClosesStream(opts *byteStreamOpts) {
+ opts.Close = true
+}
+
+type byteStreamOpts struct {
+ Close bool
+}
+
+// ByteStreamConsumer creates a consumer for byte streams.
+//
+// The consumer consumes from a provided reader into the data passed by reference.
+//
+// Supported output underlying types and interfaces, prioritized in this order:
+// - io.ReaderFrom (for maximum control)
+// - io.Writer (performs io.Copy)
+// - encoding.BinaryUnmarshaler
+// - *string
+// - *[]byte
+func ByteStreamConsumer(opts ...byteStreamOpt) Consumer {
+ var vals byteStreamOpts
+ for _, opt := range opts {
+ opt(&vals)
+ }
+
+ return ConsumerFunc(func(reader io.Reader, data interface{}) error {
+ if reader == nil {
+ return errors.New("ByteStreamConsumer requires a reader") // early exit
+ }
+ if data == nil {
+ return errors.New("nil destination for ByteStreamConsumer")
+ }
+
+ closer := defaultCloser
+ if vals.Close {
+ if cl, isReaderCloser := reader.(io.Closer); isReaderCloser {
+ closer = cl.Close
+ }
+ }
+ defer func() {
+ _ = closer()
+ }()
+
+ if readerFrom, isReaderFrom := data.(io.ReaderFrom); isReaderFrom {
+ _, err := readerFrom.ReadFrom(reader)
+ return err
+ }
+
+ if writer, isDataWriter := data.(io.Writer); isDataWriter {
+ _, err := io.Copy(writer, reader)
+ return err
+ }
+
+ // buffers input before writing to data
+ var buf bytes.Buffer
+ _, err := buf.ReadFrom(reader)
+ if err != nil {
+ return err
+ }
+ b := buf.Bytes()
+
+ switch destinationPointer := data.(type) {
+ case encoding.BinaryUnmarshaler:
+ return destinationPointer.UnmarshalBinary(b)
+ case *any:
+ switch (*destinationPointer).(type) {
+ case string:
+ *destinationPointer = string(b)
+
+ return nil
+
+ case []byte:
+ *destinationPointer = b
+
+ return nil
+ }
+ default:
+ // check for the underlying type to be pointer to []byte or string,
+ if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr {
+ return errors.New("destination must be a pointer")
+ }
+
+ v := reflect.Indirect(reflect.ValueOf(data))
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
+ v.SetBytes(b)
+ return nil
+
+ case t.Kind() == reflect.String:
+ v.SetString(string(b))
+ return nil
+ }
+ }
+
+ return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s",
+ data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface")
+ })
+}
+
+// ByteStreamProducer creates a producer for byte streams.
+//
+// The producer takes input data then writes to an output writer (essentially as a pipe).
+//
+// Supported input underlying types and interfaces, prioritized in this order:
+// - io.WriterTo (for maximum control)
+// - io.Reader (performs io.Copy). A ReadCloser is closed before exiting.
+// - encoding.BinaryMarshaler
+// - error (writes as a string)
+// - []byte
+// - string
+// - struct, other slices: writes as JSON
+func ByteStreamProducer(opts ...byteStreamOpt) Producer {
+ var vals byteStreamOpts
+ for _, opt := range opts {
+ opt(&vals)
+ }
+
+ return ProducerFunc(func(writer io.Writer, data interface{}) error {
+ if writer == nil {
+ return errors.New("ByteStreamProducer requires a writer") // early exit
+ }
+ if data == nil {
+ return errors.New("nil data for ByteStreamProducer")
+ }
+
+ closer := defaultCloser
+ if vals.Close {
+ if cl, isWriterCloser := writer.(io.Closer); isWriterCloser {
+ closer = cl.Close
+ }
+ }
+ defer func() {
+ _ = closer()
+ }()
+
+ if rc, isDataCloser := data.(io.ReadCloser); isDataCloser {
+ defer rc.Close()
+ }
+
+ switch origin := data.(type) {
+ case io.WriterTo:
+ _, err := origin.WriteTo(writer)
+ return err
+
+ case io.Reader:
+ _, err := io.Copy(writer, origin)
+ return err
+
+ case encoding.BinaryMarshaler:
+ bytes, err := origin.MarshalBinary()
+ if err != nil {
+ return err
+ }
+
+ _, err = writer.Write(bytes)
+ return err
+
+ case error:
+ _, err := writer.Write([]byte(origin.Error()))
+ return err
+
+ default:
+ v := reflect.Indirect(reflect.ValueOf(data))
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
+ _, err := writer.Write(v.Bytes())
+ return err
+
+ case t.Kind() == reflect.String:
+ _, err := writer.Write([]byte(v.String()))
+ return err
+
+ case t.Kind() == reflect.Struct || t.Kind() == reflect.Slice:
+ b, err := swag.WriteJSON(data)
+ if err != nil {
+ return err
+ }
+
+ _, err = writer.Write(b)
+ return err
+ }
+ }
+
+ return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s",
+ data, data, "can be resolved by supporting Reader/BinaryMarshaler interface")
+ })
+}
diff --git a/vendor/github.com/go-openapi/runtime/client/auth_info.go b/vendor/github.com/go-openapi/runtime/client/auth_info.go
new file mode 100644
index 00000000..4f26e923
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/client/auth_info.go
@@ -0,0 +1,77 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "encoding/base64"
+
+ "github.com/go-openapi/strfmt"
+
+ "github.com/go-openapi/runtime"
+)
+
+// PassThroughAuth never manipulates the request
+var PassThroughAuth runtime.ClientAuthInfoWriter
+
+func init() {
+ PassThroughAuth = runtime.ClientAuthInfoWriterFunc(func(_ runtime.ClientRequest, _ strfmt.Registry) error { return nil })
+}
+
+// BasicAuth provides a basic auth info writer
+func BasicAuth(username, password string) runtime.ClientAuthInfoWriter {
+ return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error {
+ encoded := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
+ return r.SetHeaderParam(runtime.HeaderAuthorization, "Basic "+encoded)
+ })
+}
+
+// APIKeyAuth provides an API key auth info writer
+func APIKeyAuth(name, in, value string) runtime.ClientAuthInfoWriter {
+ if in == "query" {
+ return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error {
+ return r.SetQueryParam(name, value)
+ })
+ }
+
+ if in == "header" {
+ return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error {
+ return r.SetHeaderParam(name, value)
+ })
+ }
+ return nil
+}
+
+// BearerToken provides a header based oauth2 bearer access token auth info writer
+func BearerToken(token string) runtime.ClientAuthInfoWriter {
+ return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error {
+ return r.SetHeaderParam(runtime.HeaderAuthorization, "Bearer "+token)
+ })
+}
+
+// Compose combines multiple ClientAuthInfoWriters into a single one.
+// Useful when multiple auth headers are needed.
+func Compose(auths ...runtime.ClientAuthInfoWriter) runtime.ClientAuthInfoWriter {
+ return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error {
+ for _, auth := range auths {
+ if auth == nil {
+ continue
+ }
+ if err := auth.AuthenticateRequest(r, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
diff --git a/vendor/github.com/go-openapi/runtime/client/keepalive.go b/vendor/github.com/go-openapi/runtime/client/keepalive.go
new file mode 100644
index 00000000..7dd6b51c
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/client/keepalive.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+ "io"
+ "net/http"
+ "sync/atomic"
+)
+
+// KeepAliveTransport drains the remaining body from a response
+// so that go will reuse the TCP connections.
+// This is not enabled by default because there are servers where
+// the response never gets closed and that would make the code hang forever.
+// So instead it's provided as a http client middleware that can be used to override
+// any request.
+func KeepAliveTransport(rt http.RoundTripper) http.RoundTripper {
+ return &keepAliveTransport{wrapped: rt}
+}
+
+type keepAliveTransport struct {
+ wrapped http.RoundTripper
+}
+
+func (k *keepAliveTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+ resp, err := k.wrapped.RoundTrip(r)
+ if err != nil {
+ return resp, err
+ }
+ resp.Body = &drainingReadCloser{rdr: resp.Body}
+ return resp, nil
+}
+
+type drainingReadCloser struct {
+ rdr io.ReadCloser
+ seenEOF uint32
+}
+
+func (d *drainingReadCloser) Read(p []byte) (n int, err error) {
+ n, err = d.rdr.Read(p)
+ if err == io.EOF || n == 0 {
+ atomic.StoreUint32(&d.seenEOF, 1)
+ }
+ return
+}
+
+func (d *drainingReadCloser) Close() error {
+ // drain buffer
+ if atomic.LoadUint32(&d.seenEOF) != 1 {
+ // If the reader side (a HTTP server) is misbehaving, it still may send
+ // some bytes, but the closer ignores them to keep the underling
+ // connection open.
+ _, _ = io.Copy(io.Discard, d.rdr)
+ }
+ return d.rdr.Close()
+}
diff --git a/vendor/github.com/go-openapi/runtime/client/opentelemetry.go b/vendor/github.com/go-openapi/runtime/client/opentelemetry.go
new file mode 100644
index 00000000..256cd1b4
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/client/opentelemetry.go
@@ -0,0 +1,211 @@
+package client
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/propagation"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
+ "go.opentelemetry.io/otel/trace"
+)
+
+const (
+ instrumentationVersion = "1.0.0"
+ tracerName = "go-openapi"
+)
+
+type config struct {
+ Tracer trace.Tracer
+ Propagator propagation.TextMapPropagator
+ SpanStartOptions []trace.SpanStartOption
+ SpanNameFormatter func(*runtime.ClientOperation) string
+ TracerProvider trace.TracerProvider
+}
+
+type OpenTelemetryOpt interface {
+ apply(*config)
+}
+
+type optionFunc func(*config)
+
+func (o optionFunc) apply(c *config) {
+ o(c)
+}
+
+// WithTracerProvider specifies a tracer provider to use for creating a tracer.
+// If none is specified, the global provider is used.
+func WithTracerProvider(provider trace.TracerProvider) OpenTelemetryOpt {
+ return optionFunc(func(c *config) {
+ if provider != nil {
+ c.TracerProvider = provider
+ }
+ })
+}
+
+// WithPropagators configures specific propagators. If this
+// option isn't specified, then the global TextMapPropagator is used.
+func WithPropagators(ps propagation.TextMapPropagator) OpenTelemetryOpt {
+ return optionFunc(func(c *config) {
+ if ps != nil {
+ c.Propagator = ps
+ }
+ })
+}
+
+// WithSpanOptions configures an additional set of
+// trace.SpanOptions, which are applied to each new span.
+func WithSpanOptions(opts ...trace.SpanStartOption) OpenTelemetryOpt {
+ return optionFunc(func(c *config) {
+ c.SpanStartOptions = append(c.SpanStartOptions, opts...)
+ })
+}
+
+// WithSpanNameFormatter takes a function that will be called on every
+// request and the returned string will become the Span Name.
+func WithSpanNameFormatter(f func(op *runtime.ClientOperation) string) OpenTelemetryOpt {
+ return optionFunc(func(c *config) {
+ c.SpanNameFormatter = f
+ })
+}
+
+func defaultTransportFormatter(op *runtime.ClientOperation) string {
+ if op.ID != "" {
+ return op.ID
+ }
+
+ return fmt.Sprintf("%s_%s", strings.ToLower(op.Method), op.PathPattern)
+}
+
+type openTelemetryTransport struct {
+ transport runtime.ClientTransport
+ host string
+ tracer trace.Tracer
+ config *config
+}
+
+func newOpenTelemetryTransport(transport runtime.ClientTransport, host string, opts []OpenTelemetryOpt) *openTelemetryTransport {
+ tr := &openTelemetryTransport{
+ transport: transport,
+ host: host,
+ }
+
+ defaultOpts := []OpenTelemetryOpt{
+ WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)),
+ WithSpanNameFormatter(defaultTransportFormatter),
+ WithPropagators(otel.GetTextMapPropagator()),
+ WithTracerProvider(otel.GetTracerProvider()),
+ }
+
+ c := newConfig(append(defaultOpts, opts...)...)
+ tr.config = c
+
+ return tr
+}
+
+func (t *openTelemetryTransport) Submit(op *runtime.ClientOperation) (interface{}, error) {
+ if op.Context == nil {
+ return t.transport.Submit(op)
+ }
+
+ params := op.Params
+ reader := op.Reader
+
+ var span trace.Span
+ defer func() {
+ if span != nil {
+ span.End()
+ }
+ }()
+
+ op.Params = runtime.ClientRequestWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error {
+ span = t.newOpenTelemetrySpan(op, req.GetHeaderParams())
+ return params.WriteToRequest(req, reg)
+ })
+
+ op.Reader = runtime.ClientResponseReaderFunc(func(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ if span != nil {
+ statusCode := response.Code()
+ // NOTE: this is replaced by semconv.HTTPResponseStatusCode in semconv v1.21
+ span.SetAttributes(semconv.HTTPStatusCode(statusCode))
+ // NOTE: the conversion from HTTP status code to trace code is no longer available with
+ // semconv v1.21
+ span.SetStatus(httpconv.ServerStatus(statusCode))
+ }
+
+ return reader.ReadResponse(response, consumer)
+ })
+
+ submit, err := t.transport.Submit(op)
+ if err != nil && span != nil {
+ span.RecordError(err)
+ span.SetStatus(codes.Error, err.Error())
+ }
+
+ return submit, err
+}
+
+func (t *openTelemetryTransport) newOpenTelemetrySpan(op *runtime.ClientOperation, header http.Header) trace.Span {
+ ctx := op.Context
+
+ tracer := t.tracer
+ if tracer == nil {
+ if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() {
+ tracer = newTracer(span.TracerProvider())
+ } else {
+ tracer = newTracer(otel.GetTracerProvider())
+ }
+ }
+
+ ctx, span := tracer.Start(ctx, t.config.SpanNameFormatter(op), t.config.SpanStartOptions...)
+
+ var scheme string
+ if len(op.Schemes) > 0 {
+ scheme = op.Schemes[0]
+ }
+
+ span.SetAttributes(
+ attribute.String("net.peer.name", t.host),
+ attribute.String(string(semconv.HTTPRouteKey), op.PathPattern),
+ attribute.String(string(semconv.HTTPMethodKey), op.Method),
+ attribute.String("span.kind", trace.SpanKindClient.String()),
+ attribute.String("http.scheme", scheme),
+ )
+
+ carrier := propagation.HeaderCarrier(header)
+ t.config.Propagator.Inject(ctx, carrier)
+
+ return span
+}
+
+func newTracer(tp trace.TracerProvider) trace.Tracer {
+ return tp.Tracer(tracerName, trace.WithInstrumentationVersion(version()))
+}
+
+func newConfig(opts ...OpenTelemetryOpt) *config {
+ c := &config{
+ Propagator: otel.GetTextMapPropagator(),
+ }
+
+ for _, opt := range opts {
+ opt.apply(c)
+ }
+
+ // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context.
+ if c.TracerProvider != nil {
+ c.Tracer = newTracer(c.TracerProvider)
+ }
+
+ return c
+}
+
+// Version is the current release version of the go-runtime instrumentation.
+func version() string {
+ return instrumentationVersion
+}
diff --git a/vendor/github.com/go-openapi/runtime/client/opentracing.go b/vendor/github.com/go-openapi/runtime/client/opentracing.go
new file mode 100644
index 00000000..627286d1
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/client/opentracing.go
@@ -0,0 +1,99 @@
+package client
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ "github.com/opentracing/opentracing-go/log"
+
+ "github.com/go-openapi/runtime"
+)
+
+type tracingTransport struct {
+ transport runtime.ClientTransport
+ host string
+ opts []opentracing.StartSpanOption
+}
+
+func newOpenTracingTransport(transport runtime.ClientTransport, host string, opts []opentracing.StartSpanOption,
+) runtime.ClientTransport {
+ return &tracingTransport{
+ transport: transport,
+ host: host,
+ opts: opts,
+ }
+}
+
+func (t *tracingTransport) Submit(op *runtime.ClientOperation) (interface{}, error) {
+ if op.Context == nil {
+ return t.transport.Submit(op)
+ }
+
+ params := op.Params
+ reader := op.Reader
+
+ var span opentracing.Span
+ defer func() {
+ if span != nil {
+ span.Finish()
+ }
+ }()
+
+ op.Params = runtime.ClientRequestWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error {
+ span = createClientSpan(op, req.GetHeaderParams(), t.host, t.opts)
+ return params.WriteToRequest(req, reg)
+ })
+
+ op.Reader = runtime.ClientResponseReaderFunc(func(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ if span != nil {
+ code := response.Code()
+ ext.HTTPStatusCode.Set(span, uint16(code))
+ if code >= 400 {
+ ext.Error.Set(span, true)
+ }
+ }
+ return reader.ReadResponse(response, consumer)
+ })
+
+ submit, err := t.transport.Submit(op)
+ if err != nil && span != nil {
+ ext.Error.Set(span, true)
+ span.LogFields(log.Error(err))
+ }
+ return submit, err
+}
+
+func createClientSpan(op *runtime.ClientOperation, header http.Header, host string,
+ opts []opentracing.StartSpanOption) opentracing.Span {
+ ctx := op.Context
+ span := opentracing.SpanFromContext(ctx)
+
+ if span != nil {
+ opts = append(opts, ext.SpanKindRPCClient)
+ span, _ = opentracing.StartSpanFromContextWithTracer(
+ ctx, span.Tracer(), operationName(op), opts...)
+
+ ext.Component.Set(span, "go-openapi")
+ ext.PeerHostname.Set(span, host)
+ span.SetTag("http.path", op.PathPattern)
+ ext.HTTPMethod.Set(span, op.Method)
+
+ _ = span.Tracer().Inject(
+ span.Context(),
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(header))
+
+ return span
+ }
+ return nil
+}
+
+func operationName(op *runtime.ClientOperation) string {
+ if op.ID != "" {
+ return op.ID
+ }
+ return fmt.Sprintf("%s_%s", op.Method, op.PathPattern)
+}
diff --git a/vendor/github.com/go-openapi/runtime/client/request.go b/vendor/github.com/go-openapi/runtime/client/request.go
new file mode 100644
index 00000000..c4a891d0
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/client/request.go
@@ -0,0 +1,482 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "log"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/go-openapi/strfmt"
+
+ "github.com/go-openapi/runtime"
+)
+
+// NewRequest creates a new swagger http client request
+func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) *request {
+ return &request{
+ pathPattern: pathPattern,
+ method: method,
+ writer: writer,
+ header: make(http.Header),
+ query: make(url.Values),
+ timeout: DefaultTimeout,
+ getBody: getRequestBuffer,
+ }
+}
+
+// Request represents a swagger client request.
+//
+// This Request struct converts to a HTTP request.
+// There might be others that convert to other transports.
+// There is no error checking here, it is assumed to be used after a spec has been validated.
+// so impossible combinations should not arise (hopefully).
+//
+// The main purpose of this struct is to hide the machinery of adding params to a transport request.
+// The generated code only implements what is necessary to turn a param into a valid value for these methods.
+type request struct {
+ pathPattern string
+ method string
+ writer runtime.ClientRequestWriter
+
+ pathParams map[string]string
+ header http.Header
+ query url.Values
+ formFields url.Values
+ fileFields map[string][]runtime.NamedReadCloser
+ payload interface{}
+ timeout time.Duration
+ buf *bytes.Buffer
+
+ getBody func(r *request) []byte
+}
+
+var (
+ // ensure interface compliance
+ _ runtime.ClientRequest = new(request)
+)
+
+func (r *request) isMultipart(mediaType string) bool {
+ if len(r.fileFields) > 0 {
+ return true
+ }
+
+ return runtime.MultipartFormMime == mediaType
+}
+
+// BuildHTTP creates a new http request based on the data from the params
+func (r *request) BuildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry) (*http.Request, error) {
+ return r.buildHTTP(mediaType, basePath, producers, registry, nil)
+}
+func escapeQuotes(s string) string {
+ return strings.NewReplacer("\\", "\\\\", `"`, "\\\"").Replace(s)
+}
+
+func logClose(err error, pw *io.PipeWriter) {
+ log.Println(err)
+ closeErr := pw.CloseWithError(err)
+ if closeErr != nil {
+ log.Println(closeErr)
+ }
+}
+
+func (r *request) buildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry, auth runtime.ClientAuthInfoWriter) (*http.Request, error) { //nolint:gocyclo,maintidx
+ // build the data
+ if err := r.writer.WriteToRequest(r, registry); err != nil {
+ return nil, err
+ }
+
+ // Our body must be an io.Reader.
+ // When we create the http.Request, if we pass it a
+ // bytes.Buffer then it will wrap it in an io.ReadCloser
+ // and set the content length automatically.
+ var body io.Reader
+ var pr *io.PipeReader
+ var pw *io.PipeWriter
+
+ r.buf = bytes.NewBuffer(nil)
+ if r.payload != nil || len(r.formFields) > 0 || len(r.fileFields) > 0 {
+ body = r.buf
+ if r.isMultipart(mediaType) {
+ pr, pw = io.Pipe()
+ body = pr
+ }
+ }
+
+ // check if this is a form type request
+ if len(r.formFields) > 0 || len(r.fileFields) > 0 {
+ if !r.isMultipart(mediaType) {
+ r.header.Set(runtime.HeaderContentType, mediaType)
+ formString := r.formFields.Encode()
+ r.buf.WriteString(formString)
+ goto DoneChoosingBodySource
+ }
+
+ mp := multipart.NewWriter(pw)
+ r.header.Set(runtime.HeaderContentType, mangleContentType(mediaType, mp.Boundary()))
+
+ go func() {
+ defer func() {
+ mp.Close()
+ pw.Close()
+ }()
+
+ for fn, v := range r.formFields {
+ for _, vi := range v {
+ if err := mp.WriteField(fn, vi); err != nil {
+ logClose(err, pw)
+ return
+ }
+ }
+ }
+
+ defer func() {
+ for _, ff := range r.fileFields {
+ for _, ffi := range ff {
+ ffi.Close()
+ }
+ }
+ }()
+ for fn, f := range r.fileFields {
+ for _, fi := range f {
+ var fileContentType string
+ if p, ok := fi.(interface {
+ ContentType() string
+ }); ok {
+ fileContentType = p.ContentType()
+ } else {
+ // Need to read the data so that we can detect the content type
+ buf := make([]byte, 512)
+ size, err := fi.Read(buf)
+ if err != nil && err != io.EOF {
+ logClose(err, pw)
+ return
+ }
+ fileContentType = http.DetectContentType(buf)
+ fi = runtime.NamedReader(fi.Name(), io.MultiReader(bytes.NewReader(buf[:size]), fi))
+ }
+
+ // Create the MIME headers for the new part
+ h := make(textproto.MIMEHeader)
+ h.Set("Content-Disposition",
+ fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
+ escapeQuotes(fn), escapeQuotes(filepath.Base(fi.Name()))))
+ h.Set("Content-Type", fileContentType)
+
+ wrtr, err := mp.CreatePart(h)
+ if err != nil {
+ logClose(err, pw)
+ return
+ }
+ if _, err := io.Copy(wrtr, fi); err != nil {
+ logClose(err, pw)
+ }
+ }
+ }
+ }()
+
+ goto DoneChoosingBodySource
+ }
+
+ // if there is payload, use the producer to write the payload, and then
+ // set the header to the content-type appropriate for the payload produced
+ if r.payload != nil {
+ // TODO: infer most appropriate content type based on the producer used,
+ // and the `consumers` section of the spec/operation
+ r.header.Set(runtime.HeaderContentType, mediaType)
+ if rdr, ok := r.payload.(io.ReadCloser); ok {
+ body = rdr
+ goto DoneChoosingBodySource
+ }
+
+ if rdr, ok := r.payload.(io.Reader); ok {
+ body = rdr
+ goto DoneChoosingBodySource
+ }
+
+ producer := producers[mediaType]
+ if err := producer.Produce(r.buf, r.payload); err != nil {
+ return nil, err
+ }
+ }
+
+DoneChoosingBodySource:
+
+ if runtime.CanHaveBody(r.method) && body != nil && r.header.Get(runtime.HeaderContentType) == "" {
+ r.header.Set(runtime.HeaderContentType, mediaType)
+ }
+
+ if auth != nil {
+ // If we're not using r.buf as our http.Request's body,
+ // either the payload is an io.Reader or io.ReadCloser,
+ // or we're doing a multipart form/file.
+ //
+ // In those cases, if the AuthenticateRequest call asks for the body,
+ // we must read it into a buffer and provide that, then use that buffer
+ // as the body of our http.Request.
+ //
+ // This is done in-line with the GetBody() request rather than ahead
+ // of time, because there's no way to know if the AuthenticateRequest
+ // will even ask for the body of the request.
+ //
+ // If for some reason the copy fails, there's no way to return that
+ // error to the GetBody() call, so return it afterwards.
+ //
+ // An error from the copy action is prioritized over any error
+ // from the AuthenticateRequest call, because the mis-read
+ // body may have interfered with the auth.
+ //
+ var copyErr error
+ if buf, ok := body.(*bytes.Buffer); body != nil && (!ok || buf != r.buf) {
+ var copied bool
+ r.getBody = func(r *request) []byte {
+ if copied {
+ return getRequestBuffer(r)
+ }
+
+ defer func() {
+ copied = true
+ }()
+
+ if _, copyErr = io.Copy(r.buf, body); copyErr != nil {
+ return nil
+ }
+
+ if closer, ok := body.(io.ReadCloser); ok {
+ if copyErr = closer.Close(); copyErr != nil {
+ return nil
+ }
+ }
+
+ body = r.buf
+ return getRequestBuffer(r)
+ }
+ }
+
+ authErr := auth.AuthenticateRequest(r, registry)
+
+ if copyErr != nil {
+ return nil, fmt.Errorf("error retrieving the response body: %v", copyErr)
+ }
+
+ if authErr != nil {
+ return nil, authErr
+ }
+ }
+
+ // In case the basePath or the request pathPattern include static query parameters,
+ // parse those out before constructing the final path. The parameters themselves
+ // will be merged with the ones set by the client, with the priority given first to
+ // the ones set by the client, then the path pattern, and lastly the base path.
+ basePathURL, err := url.Parse(basePath)
+ if err != nil {
+ return nil, err
+ }
+ staticQueryParams := basePathURL.Query()
+
+ pathPatternURL, err := url.Parse(r.pathPattern)
+ if err != nil {
+ return nil, err
+ }
+ for name, values := range pathPatternURL.Query() {
+ if _, present := staticQueryParams[name]; present {
+ staticQueryParams.Del(name)
+ }
+ for _, value := range values {
+ staticQueryParams.Add(name, value)
+ }
+ }
+
+ // create http request
+ var reinstateSlash bool
+ if pathPatternURL.Path != "" && pathPatternURL.Path != "/" && pathPatternURL.Path[len(pathPatternURL.Path)-1] == '/' {
+ reinstateSlash = true
+ }
+
+ urlPath := path.Join(basePathURL.Path, pathPatternURL.Path)
+ for k, v := range r.pathParams {
+ urlPath = strings.ReplaceAll(urlPath, "{"+k+"}", url.PathEscape(v))
+ }
+ if reinstateSlash {
+ urlPath += "/"
+ }
+
+ req, err := http.NewRequestWithContext(context.Background(), r.method, urlPath, body)
+ if err != nil {
+ return nil, err
+ }
+
+ originalParams := r.GetQueryParams()
+
+ // Merge the query parameters extracted from the basePath with the ones set by
+ // the client in this struct. In case of conflict, the client wins.
+ for k, v := range staticQueryParams {
+ _, present := originalParams[k]
+ if !present {
+ if err = r.SetQueryParam(k, v...); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ req.URL.RawQuery = r.query.Encode()
+ req.Header = r.header
+
+ return req, nil
+}
+
+func mangleContentType(mediaType, boundary string) string {
+ if strings.ToLower(mediaType) == runtime.URLencodedFormMime {
+ return fmt.Sprintf("%s; boundary=%s", mediaType, boundary)
+ }
+ return "multipart/form-data; boundary=" + boundary
+}
+
+func (r *request) GetMethod() string {
+ return r.method
+}
+
+func (r *request) GetPath() string {
+ path := r.pathPattern
+ for k, v := range r.pathParams {
+ path = strings.ReplaceAll(path, "{"+k+"}", v)
+ }
+ return path
+}
+
+func (r *request) GetBody() []byte {
+ return r.getBody(r)
+}
+
+func getRequestBuffer(r *request) []byte {
+ if r.buf == nil {
+ return nil
+ }
+ return r.buf.Bytes()
+}
+
+// SetHeaderParam adds a header param to the request
+// when there is only 1 value provided for the varargs, it will set it.
+// when there are several values provided for the varargs it will add it (no overriding)
+func (r *request) SetHeaderParam(name string, values ...string) error {
+ if r.header == nil {
+ r.header = make(http.Header)
+ }
+ r.header[http.CanonicalHeaderKey(name)] = values
+ return nil
+}
+
+// GetHeaderParams returns the all headers currently set for the request
+func (r *request) GetHeaderParams() http.Header {
+ return r.header
+}
+
+// SetQueryParam adds a query param to the request
+// when there is only 1 value provided for the varargs, it will set it.
+// when there are several values provided for the varargs it will add it (no overriding)
+func (r *request) SetQueryParam(name string, values ...string) error {
+ if r.query == nil {
+ r.query = make(url.Values)
+ }
+ r.query[name] = values
+ return nil
+}
+
+// GetQueryParams returns a copy of all query params currently set for the request
+func (r *request) GetQueryParams() url.Values {
+ var result = make(url.Values)
+ for key, value := range r.query {
+ result[key] = append([]string{}, value...)
+ }
+ return result
+}
+
+// SetFormParam adds a forn param to the request
+// when there is only 1 value provided for the varargs, it will set it.
+// when there are several values provided for the varargs it will add it (no overriding)
+func (r *request) SetFormParam(name string, values ...string) error {
+ if r.formFields == nil {
+ r.formFields = make(url.Values)
+ }
+ r.formFields[name] = values
+ return nil
+}
+
+// SetPathParam adds a path param to the request
+func (r *request) SetPathParam(name string, value string) error {
+ if r.pathParams == nil {
+ r.pathParams = make(map[string]string)
+ }
+
+ r.pathParams[name] = value
+ return nil
+}
+
+// SetFileParam adds a file param to the request
+func (r *request) SetFileParam(name string, files ...runtime.NamedReadCloser) error {
+ for _, file := range files {
+ if actualFile, ok := file.(*os.File); ok {
+ fi, err := os.Stat(actualFile.Name())
+ if err != nil {
+ return err
+ }
+ if fi.IsDir() {
+ return fmt.Errorf("%q is a directory, only files are supported", file.Name())
+ }
+ }
+ }
+
+ if r.fileFields == nil {
+ r.fileFields = make(map[string][]runtime.NamedReadCloser)
+ }
+ if r.formFields == nil {
+ r.formFields = make(url.Values)
+ }
+
+ r.fileFields[name] = files
+ return nil
+}
+
+func (r *request) GetFileParam() map[string][]runtime.NamedReadCloser {
+ return r.fileFields
+}
+
+// SetBodyParam sets a body parameter on the request.
+// This does not yet serialze the object, this happens as late as possible.
+func (r *request) SetBodyParam(payload interface{}) error {
+ r.payload = payload
+ return nil
+}
+
+func (r *request) GetBodyParam() interface{} {
+ return r.payload
+}
+
+// SetTimeout sets the timeout for a request
+func (r *request) SetTimeout(timeout time.Duration) error {
+ r.timeout = timeout
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/runtime/client/response.go b/vendor/github.com/go-openapi/runtime/client/response.go
new file mode 100644
index 00000000..0bbd388b
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/client/response.go
@@ -0,0 +1,50 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "io"
+ "net/http"
+
+ "github.com/go-openapi/runtime"
+)
+
+var _ runtime.ClientResponse = response{}
+
+func newResponse(resp *http.Response) runtime.ClientResponse { return response{resp: resp} }
+
+type response struct {
+ resp *http.Response
+}
+
+func (r response) Code() int {
+ return r.resp.StatusCode
+}
+
+func (r response) Message() string {
+ return r.resp.Status
+}
+
+func (r response) GetHeader(name string) string {
+ return r.resp.Header.Get(name)
+}
+
+func (r response) GetHeaders(name string) []string {
+ return r.resp.Header.Values(name)
+}
+
+func (r response) Body() io.ReadCloser {
+ return r.resp.Body
+}
diff --git a/vendor/github.com/go-openapi/runtime/client/runtime.go b/vendor/github.com/go-openapi/runtime/client/runtime.go
new file mode 100644
index 00000000..5bd4d75d
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/client/runtime.go
@@ -0,0 +1,552 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "mime"
+ "net/http"
+ "net/http/httputil"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/opentracing/opentracing-go"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/runtime/logger"
+ "github.com/go-openapi/runtime/middleware"
+ "github.com/go-openapi/runtime/yamlpc"
+)
+
+const (
+ schemeHTTP = "http"
+ schemeHTTPS = "https"
+)
+
+// TLSClientOptions to configure client authentication with mutual TLS
+type TLSClientOptions struct {
+ // Certificate is the path to a PEM-encoded certificate to be used for
+ // client authentication. If set then Key must also be set.
+ Certificate string
+
+ // LoadedCertificate is the certificate to be used for client authentication.
+ // This field is ignored if Certificate is set. If this field is set, LoadedKey
+ // is also required.
+ LoadedCertificate *x509.Certificate
+
+ // Key is the path to an unencrypted PEM-encoded private key for client
+ // authentication. This field is required if Certificate is set.
+ Key string
+
+ // LoadedKey is the key for client authentication. This field is required if
+ // LoadedCertificate is set.
+ LoadedKey crypto.PrivateKey
+
+ // CA is a path to a PEM-encoded certificate that specifies the root certificate
+ // to use when validating the TLS certificate presented by the server. If this field
+ // (and LoadedCA) is not set, the system certificate pool is used. This field is ignored if LoadedCA
+ // is set.
+ CA string
+
+ // LoadedCA specifies the root certificate to use when validating the server's TLS certificate.
+ // If this field (and CA) is not set, the system certificate pool is used.
+ LoadedCA *x509.Certificate
+
+ // LoadedCAPool specifies a pool of RootCAs to use when validating the server's TLS certificate.
+ // If set, it will be combined with the other loaded certificates (see LoadedCA and CA).
+ // If neither LoadedCA or CA is set, the provided pool with override the system
+ // certificate pool.
+ // The caller must not use the supplied pool after calling TLSClientAuth.
+ LoadedCAPool *x509.CertPool
+
+ // ServerName specifies the hostname to use when verifying the server certificate.
+ // If this field is set then InsecureSkipVerify will be ignored and treated as
+ // false.
+ ServerName string
+
+ // InsecureSkipVerify controls whether the certificate chain and hostname presented
+ // by the server are validated. If true, any certificate is accepted.
+ InsecureSkipVerify bool
+
+ // VerifyPeerCertificate, if not nil, is called after normal
+ // certificate verification. It receives the raw ASN.1 certificates
+ // provided by the peer and also any verified chains that normal processing found.
+ // If it returns a non-nil error, the handshake is aborted and that error results.
+ //
+ // If normal verification fails then the handshake will abort before
+ // considering this callback. If normal verification is disabled by
+ // setting InsecureSkipVerify then this callback will be considered but
+ // the verifiedChains argument will always be nil.
+ VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
+
+ // SessionTicketsDisabled may be set to true to disable session ticket and
+ // PSK (resumption) support. Note that on clients, session ticket support is
+ // also disabled if ClientSessionCache is nil.
+ SessionTicketsDisabled bool
+
+ // ClientSessionCache is a cache of ClientSessionState entries for TLS
+ // session resumption. It is only used by clients.
+ ClientSessionCache tls.ClientSessionCache
+
+ // Prevents callers using unkeyed fields.
+ _ struct{}
+}
+
+// TLSClientAuth creates a tls.Config for mutual auth
+func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) {
+ // create client tls config
+ cfg := &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ }
+
+ // load client cert if specified
+ if opts.Certificate != "" {
+ cert, err := tls.LoadX509KeyPair(opts.Certificate, opts.Key)
+ if err != nil {
+ return nil, fmt.Errorf("tls client cert: %v", err)
+ }
+ cfg.Certificates = []tls.Certificate{cert}
+ } else if opts.LoadedCertificate != nil {
+ block := pem.Block{Type: "CERTIFICATE", Bytes: opts.LoadedCertificate.Raw}
+ certPem := pem.EncodeToMemory(&block)
+
+ var keyBytes []byte
+ switch k := opts.LoadedKey.(type) {
+ case *rsa.PrivateKey:
+ keyBytes = x509.MarshalPKCS1PrivateKey(k)
+ case *ecdsa.PrivateKey:
+ var err error
+ keyBytes, err = x509.MarshalECPrivateKey(k)
+ if err != nil {
+ return nil, fmt.Errorf("tls client priv key: %v", err)
+ }
+ default:
+ return nil, errors.New("tls client priv key: unsupported key type")
+ }
+
+ block = pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes}
+ keyPem := pem.EncodeToMemory(&block)
+
+ cert, err := tls.X509KeyPair(certPem, keyPem)
+ if err != nil {
+ return nil, fmt.Errorf("tls client cert: %v", err)
+ }
+ cfg.Certificates = []tls.Certificate{cert}
+ }
+
+ cfg.InsecureSkipVerify = opts.InsecureSkipVerify
+
+ cfg.VerifyPeerCertificate = opts.VerifyPeerCertificate
+ cfg.SessionTicketsDisabled = opts.SessionTicketsDisabled
+ cfg.ClientSessionCache = opts.ClientSessionCache
+
+ // When no CA certificate is provided, default to the system cert pool
+ // that way when a request is made to a server known by the system trust store,
+ // the name is still verified
+ switch {
+ case opts.LoadedCA != nil:
+ caCertPool := basePool(opts.LoadedCAPool)
+ caCertPool.AddCert(opts.LoadedCA)
+ cfg.RootCAs = caCertPool
+ case opts.CA != "":
+ // load ca cert
+ caCert, err := os.ReadFile(opts.CA)
+ if err != nil {
+ return nil, fmt.Errorf("tls client ca: %v", err)
+ }
+ caCertPool := basePool(opts.LoadedCAPool)
+ caCertPool.AppendCertsFromPEM(caCert)
+ cfg.RootCAs = caCertPool
+ case opts.LoadedCAPool != nil:
+ cfg.RootCAs = opts.LoadedCAPool
+ }
+
+ // apply servername overrride
+ if opts.ServerName != "" {
+ cfg.InsecureSkipVerify = false
+ cfg.ServerName = opts.ServerName
+ }
+
+ return cfg, nil
+}
+
+func basePool(pool *x509.CertPool) *x509.CertPool {
+ if pool == nil {
+ return x509.NewCertPool()
+ }
+ return pool
+}
+
+// TLSTransport creates a http client transport suitable for mutual tls auth
+func TLSTransport(opts TLSClientOptions) (http.RoundTripper, error) {
+ cfg, err := TLSClientAuth(opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return &http.Transport{TLSClientConfig: cfg}, nil
+}
+
+// TLSClient creates a http.Client for mutual auth
+func TLSClient(opts TLSClientOptions) (*http.Client, error) {
+ transport, err := TLSTransport(opts)
+ if err != nil {
+ return nil, err
+ }
+ return &http.Client{Transport: transport}, nil
+}
+
+// DefaultTimeout the default request timeout
+var DefaultTimeout = 30 * time.Second
+
+// Runtime represents an API client that uses the transport
+// to make http requests based on a swagger specification.
+type Runtime struct {
+ DefaultMediaType string
+ DefaultAuthentication runtime.ClientAuthInfoWriter
+ Consumers map[string]runtime.Consumer
+ Producers map[string]runtime.Producer
+
+ Transport http.RoundTripper
+ Jar http.CookieJar
+ // Spec *spec.Document
+ Host string
+ BasePath string
+ Formats strfmt.Registry
+ Context context.Context //nolint:containedctx // we precisely want this type to contain the request context
+
+ Debug bool
+ logger logger.Logger
+
+ clientOnce *sync.Once
+ client *http.Client
+ schemes []string
+ response ClientResponseFunc
+}
+
+// New creates a new default runtime for a swagger api runtime.Client
+func New(host, basePath string, schemes []string) *Runtime {
+ var rt Runtime
+ rt.DefaultMediaType = runtime.JSONMime
+
+ // TODO: actually infer this stuff from the spec
+ rt.Consumers = map[string]runtime.Consumer{
+ runtime.YAMLMime: yamlpc.YAMLConsumer(),
+ runtime.JSONMime: runtime.JSONConsumer(),
+ runtime.XMLMime: runtime.XMLConsumer(),
+ runtime.TextMime: runtime.TextConsumer(),
+ runtime.HTMLMime: runtime.TextConsumer(),
+ runtime.CSVMime: runtime.CSVConsumer(),
+ runtime.DefaultMime: runtime.ByteStreamConsumer(),
+ }
+ rt.Producers = map[string]runtime.Producer{
+ runtime.YAMLMime: yamlpc.YAMLProducer(),
+ runtime.JSONMime: runtime.JSONProducer(),
+ runtime.XMLMime: runtime.XMLProducer(),
+ runtime.TextMime: runtime.TextProducer(),
+ runtime.HTMLMime: runtime.TextProducer(),
+ runtime.CSVMime: runtime.CSVProducer(),
+ runtime.DefaultMime: runtime.ByteStreamProducer(),
+ }
+ rt.Transport = http.DefaultTransport
+ rt.Jar = nil
+ rt.Host = host
+ rt.BasePath = basePath
+ rt.Context = context.Background()
+ rt.clientOnce = new(sync.Once)
+ if !strings.HasPrefix(rt.BasePath, "/") {
+ rt.BasePath = "/" + rt.BasePath
+ }
+
+ rt.Debug = logger.DebugEnabled()
+ rt.logger = logger.StandardLogger{}
+ rt.response = newResponse
+
+ if len(schemes) > 0 {
+ rt.schemes = schemes
+ }
+ return &rt
+}
+
+// NewWithClient allows you to create a new transport with a configured http.Client
+func NewWithClient(host, basePath string, schemes []string, client *http.Client) *Runtime {
+ rt := New(host, basePath, schemes)
+ if client != nil {
+ rt.clientOnce.Do(func() {
+ rt.client = client
+ })
+ }
+ return rt
+}
+
+// WithOpenTracing adds opentracing support to the provided runtime.
+// A new client span is created for each request.
+// If the context of the client operation does not contain an active span, no span is created.
+// The provided opts are applied to each spans - for example to add global tags.
+func (r *Runtime) WithOpenTracing(opts ...opentracing.StartSpanOption) runtime.ClientTransport {
+ return newOpenTracingTransport(r, r.Host, opts)
+}
+
+// WithOpenTelemetry adds opentelemetry support to the provided runtime.
+// A new client span is created for each request.
+// If the context of the client operation does not contain an active span, no span is created.
+// The provided opts are applied to each spans - for example to add global tags.
+func (r *Runtime) WithOpenTelemetry(opts ...OpenTelemetryOpt) runtime.ClientTransport {
+ return newOpenTelemetryTransport(r, r.Host, opts)
+}
+
+func (r *Runtime) pickScheme(schemes []string) string {
+ if v := r.selectScheme(r.schemes); v != "" {
+ return v
+ }
+ if v := r.selectScheme(schemes); v != "" {
+ return v
+ }
+ return schemeHTTP
+}
+
+func (r *Runtime) selectScheme(schemes []string) string {
+ schLen := len(schemes)
+ if schLen == 0 {
+ return ""
+ }
+
+ scheme := schemes[0]
+ // prefer https, but skip when not possible
+ if scheme != schemeHTTPS && schLen > 1 {
+ for _, sch := range schemes {
+ if sch == schemeHTTPS {
+ scheme = sch
+ break
+ }
+ }
+ }
+ return scheme
+}
+
+func transportOrDefault(left, right http.RoundTripper) http.RoundTripper {
+ if left == nil {
+ return right
+ }
+ return left
+}
+
+// EnableConnectionReuse drains the remaining body from a response
+// so that go will reuse the TCP connections.
+//
+// This is not enabled by default because there are servers where
+// the response never gets closed and that would make the code hang forever.
+// So instead it's provided as a http client middleware that can be used to override
+// any request.
+func (r *Runtime) EnableConnectionReuse() {
+ if r.client == nil {
+ r.Transport = KeepAliveTransport(
+ transportOrDefault(r.Transport, http.DefaultTransport),
+ )
+ return
+ }
+
+ r.client.Transport = KeepAliveTransport(
+ transportOrDefault(r.client.Transport,
+ transportOrDefault(r.Transport, http.DefaultTransport),
+ ),
+ )
+}
+
+// takes a client operation and creates equivalent http.Request
+func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*request, *http.Request, error) { //nolint:revive,stylecheck
+ params, _, auth := operation.Params, operation.Reader, operation.AuthInfo
+
+ request := newRequest(operation.Method, operation.PathPattern, params)
+
+ var accept []string
+ accept = append(accept, operation.ProducesMediaTypes...)
+ if err := request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil {
+ return nil, nil, err
+ }
+
+ if auth == nil && r.DefaultAuthentication != nil {
+ auth = runtime.ClientAuthInfoWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error {
+ if req.GetHeaderParams().Get(runtime.HeaderAuthorization) != "" {
+ return nil
+ }
+ return r.DefaultAuthentication.AuthenticateRequest(req, reg)
+ })
+ }
+ // if auth != nil {
+ // if err := auth.AuthenticateRequest(request, r.Formats); err != nil {
+ // return nil, err
+ // }
+ //}
+
+ // TODO: pick appropriate media type
+ cmt := r.DefaultMediaType
+ for _, mediaType := range operation.ConsumesMediaTypes {
+ // Pick first non-empty media type
+ if mediaType != "" {
+ cmt = mediaType
+ break
+ }
+ }
+
+ if _, ok := r.Producers[cmt]; !ok && cmt != runtime.MultipartFormMime && cmt != runtime.URLencodedFormMime {
+ return nil, nil, fmt.Errorf("none of producers: %v registered. try %s", r.Producers, cmt)
+ }
+
+ req, err := request.buildHTTP(cmt, r.BasePath, r.Producers, r.Formats, auth)
+ if err != nil {
+ return nil, nil, err
+ }
+ req.URL.Scheme = r.pickScheme(operation.Schemes)
+ req.URL.Host = r.Host
+ req.Host = r.Host
+ return request, req, nil
+}
+
+func (r *Runtime) CreateHttpRequest(operation *runtime.ClientOperation) (req *http.Request, err error) { //nolint:revive,stylecheck
+ _, req, err = r.createHttpRequest(operation)
+ return
+}
+
+// Submit a request and when there is a body on success it will turn that into the result
+// all other things are turned into an api error for swagger which retains the status code
+func (r *Runtime) Submit(operation *runtime.ClientOperation) (interface{}, error) {
+ _, readResponse, _ := operation.Params, operation.Reader, operation.AuthInfo
+
+ request, req, err := r.createHttpRequest(operation)
+ if err != nil {
+ return nil, err
+ }
+
+ r.clientOnce.Do(func() {
+ r.client = &http.Client{
+ Transport: r.Transport,
+ Jar: r.Jar,
+ }
+ })
+
+ if r.Debug {
+ b, err2 := httputil.DumpRequestOut(req, true)
+ if err2 != nil {
+ return nil, err2
+ }
+ r.logger.Debugf("%s\n", string(b))
+ }
+
+ var parentCtx context.Context
+ switch {
+ case operation.Context != nil:
+ parentCtx = operation.Context
+ case r.Context != nil:
+ parentCtx = r.Context
+ default:
+ parentCtx = context.Background()
+ }
+
+ var (
+ ctx context.Context
+ cancel context.CancelFunc
+ )
+ if request.timeout == 0 {
+ // There may be a deadline in the context passed to the operation.
+ // Otherwise, there is no timeout set.
+ ctx, cancel = context.WithCancel(parentCtx)
+ } else {
+ // Sets the timeout passed from request params (by default runtime.DefaultTimeout).
+ // If there is already a deadline in the parent context, the shortest will
+ // apply.
+ ctx, cancel = context.WithTimeout(parentCtx, request.timeout)
+ }
+ defer cancel()
+
+ var client *http.Client
+ if operation.Client != nil {
+ client = operation.Client
+ } else {
+ client = r.client
+ }
+ req = req.WithContext(ctx)
+ res, err := client.Do(req) // make requests, by default follows 10 redirects before failing
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+
+ ct := res.Header.Get(runtime.HeaderContentType)
+ if ct == "" { // this should really never occur
+ ct = r.DefaultMediaType
+ }
+
+ if r.Debug {
+ printBody := true
+ if ct == runtime.DefaultMime {
+ printBody = false // Spare the terminal from a binary blob.
+ }
+ b, err2 := httputil.DumpResponse(res, printBody)
+ if err2 != nil {
+ return nil, err2
+ }
+ r.logger.Debugf("%s\n", string(b))
+ }
+
+ mt, _, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return nil, fmt.Errorf("parse content type: %s", err)
+ }
+
+ cons, ok := r.Consumers[mt]
+ if !ok {
+ if cons, ok = r.Consumers["*/*"]; !ok {
+ // scream about not knowing what to do
+ return nil, fmt.Errorf("no consumer: %q", ct)
+ }
+ }
+ return readResponse.ReadResponse(r.response(res), cons)
+}
+
+// SetDebug changes the debug flag.
+// It ensures that client and middlewares have the set debug level.
+func (r *Runtime) SetDebug(debug bool) {
+ r.Debug = debug
+ middleware.Debug = debug
+}
+
+// SetLogger changes the logger stream.
+// It ensures that client and middlewares use the same logger.
+func (r *Runtime) SetLogger(logger logger.Logger) {
+ r.logger = logger
+ middleware.Logger = logger
+}
+
+type ClientResponseFunc = func(*http.Response) runtime.ClientResponse //nolint:revive
+
+// SetResponseReader changes the response reader implementation.
+func (r *Runtime) SetResponseReader(f ClientResponseFunc) {
+ if f == nil {
+ return
+ }
+ r.response = f
+}
diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go
new file mode 100644
index 00000000..c6c97d9a
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/client_auth_info.go
@@ -0,0 +1,30 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import "github.com/go-openapi/strfmt"
+
+// A ClientAuthInfoWriterFunc converts a function to a request writer interface
+type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error
+
+// AuthenticateRequest adds authentication data to the request
+func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error {
+ return fn(req, reg)
+}
+
+// A ClientAuthInfoWriter implementor knows how to write authentication info to a request
+type ClientAuthInfoWriter interface {
+ AuthenticateRequest(ClientRequest, strfmt.Registry) error
+}
diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go
new file mode 100644
index 00000000..5a5d6356
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/client_operation.go
@@ -0,0 +1,41 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "context"
+ "net/http"
+)
+
+// ClientOperation represents the context for a swagger operation to be submitted to the transport
+type ClientOperation struct {
+ ID string
+ Method string
+ PathPattern string
+ ProducesMediaTypes []string
+ ConsumesMediaTypes []string
+ Schemes []string
+ AuthInfo ClientAuthInfoWriter
+ Params ClientRequestWriter
+ Reader ClientResponseReader
+ Context context.Context //nolint:containedctx // we precisely want this type to contain the request context
+ Client *http.Client
+}
+
+// A ClientTransport implementor knows how to submit Request objects to some destination
+type ClientTransport interface {
+ // Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error)
+ Submit(*ClientOperation) (interface{}, error)
+}
diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go
new file mode 100644
index 00000000..4ebb2dea
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/client_request.go
@@ -0,0 +1,152 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/go-openapi/strfmt"
+)
+
+// ClientRequestWriterFunc converts a function to a request writer interface
+type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error
+
+// WriteToRequest adds data to the request
+func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error {
+ return fn(req, reg)
+}
+
+// ClientRequestWriter is an interface for things that know how to write to a request
+type ClientRequestWriter interface {
+ WriteToRequest(ClientRequest, strfmt.Registry) error
+}
+
+// ClientRequest is an interface for things that know how to
+// add information to a swagger client request.
+type ClientRequest interface { //nolint:interfacebloat // a swagger-capable request is quite rich, hence the many getter/setters
+ SetHeaderParam(string, ...string) error
+
+ GetHeaderParams() http.Header
+
+ SetQueryParam(string, ...string) error
+
+ SetFormParam(string, ...string) error
+
+ SetPathParam(string, string) error
+
+ GetQueryParams() url.Values
+
+ SetFileParam(string, ...NamedReadCloser) error
+
+ SetBodyParam(interface{}) error
+
+ SetTimeout(time.Duration) error
+
+ GetMethod() string
+
+ GetPath() string
+
+ GetBody() []byte
+
+ GetBodyParam() interface{}
+
+ GetFileParam() map[string][]NamedReadCloser
+}
+
+// NamedReadCloser represents a named ReadCloser interface
+type NamedReadCloser interface {
+ io.ReadCloser
+ Name() string
+}
+
+// NamedReader creates a NamedReadCloser for use as file upload
+func NamedReader(name string, rdr io.Reader) NamedReadCloser {
+ rc, ok := rdr.(io.ReadCloser)
+ if !ok {
+ rc = io.NopCloser(rdr)
+ }
+ return &namedReadCloser{
+ name: name,
+ cr: rc,
+ }
+}
+
+type namedReadCloser struct {
+ name string
+ cr io.ReadCloser
+}
+
+func (n *namedReadCloser) Close() error {
+ return n.cr.Close()
+}
+func (n *namedReadCloser) Read(p []byte) (int, error) {
+ return n.cr.Read(p)
+}
+func (n *namedReadCloser) Name() string {
+ return n.name
+}
+
+type TestClientRequest struct {
+ Headers http.Header
+ Body interface{}
+}
+
+func (t *TestClientRequest) SetHeaderParam(name string, values ...string) error {
+ if t.Headers == nil {
+ t.Headers = make(http.Header)
+ }
+ t.Headers.Set(name, values[0])
+ return nil
+}
+
+func (t *TestClientRequest) SetQueryParam(_ string, _ ...string) error { return nil }
+
+func (t *TestClientRequest) SetFormParam(_ string, _ ...string) error { return nil }
+
+func (t *TestClientRequest) SetPathParam(_ string, _ string) error { return nil }
+
+func (t *TestClientRequest) SetFileParam(_ string, _ ...NamedReadCloser) error { return nil }
+
+func (t *TestClientRequest) SetBodyParam(body interface{}) error {
+ t.Body = body
+ return nil
+}
+
+func (t *TestClientRequest) SetTimeout(time.Duration) error {
+ return nil
+}
+
+func (t *TestClientRequest) GetQueryParams() url.Values { return nil }
+
+func (t *TestClientRequest) GetMethod() string { return "" }
+
+func (t *TestClientRequest) GetPath() string { return "" }
+
+func (t *TestClientRequest) GetBody() []byte { return nil }
+
+func (t *TestClientRequest) GetBodyParam() interface{} {
+ return t.Body
+}
+
+func (t *TestClientRequest) GetFileParam() map[string][]NamedReadCloser {
+ return nil
+}
+
+func (t *TestClientRequest) GetHeaderParams() http.Header {
+ return t.Headers
+}
diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go
new file mode 100644
index 00000000..0d169114
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/client_response.go
@@ -0,0 +1,110 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// A ClientResponse represents a client response
+// This bridges between responses obtained from different transports
+type ClientResponse interface {
+ Code() int
+ Message() string
+ GetHeader(string) string
+ GetHeaders(string) []string
+ Body() io.ReadCloser
+}
+
+// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation
+type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error)
+
+// ReadResponse reads the response
+func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) {
+ return read(resp, consumer)
+}
+
+// A ClientResponseReader is an interface for things want to read a response.
+// An application of this is to create structs from response values
+type ClientResponseReader interface {
+ ReadResponse(ClientResponse, Consumer) (interface{}, error)
+}
+
+// NewAPIError creates a new API error
+func NewAPIError(opName string, payload interface{}, code int) *APIError {
+ return &APIError{
+ OperationName: opName,
+ Response: payload,
+ Code: code,
+ }
+}
+
+// APIError wraps an error model and captures the status code
+type APIError struct {
+ OperationName string
+ Response interface{}
+ Code int
+}
+
+func (o *APIError) Error() string {
+ var resp []byte
+ if err, ok := o.Response.(error); ok {
+ resp = []byte("'" + err.Error() + "'")
+ } else {
+ resp, _ = json.Marshal(o.Response)
+ }
+ return fmt.Sprintf("%s (status %d): %s", o.OperationName, o.Code, resp)
+}
+
+func (o *APIError) String() string {
+ return o.Error()
+}
+
+// IsSuccess returns true when this elapse o k response returns a 2xx status code
+func (o *APIError) IsSuccess() bool {
+ return o.Code/100 == 2
+}
+
+// IsRedirect returns true when this elapse o k response returns a 3xx status code
+func (o *APIError) IsRedirect() bool {
+ return o.Code/100 == 3
+}
+
+// IsClientError returns true when this elapse o k response returns a 4xx status code
+func (o *APIError) IsClientError() bool {
+ return o.Code/100 == 4
+}
+
+// IsServerError returns true when this elapse o k response returns a 5xx status code
+func (o *APIError) IsServerError() bool {
+ return o.Code/100 == 5
+}
+
+// IsCode returns true when this elapse o k response returns a 4xx status code
+func (o *APIError) IsCode(code int) bool {
+ return o.Code == code
+}
+
+// A ClientResponseStatus is a common interface implemented by all responses on the generated code
+// You can use this to treat any client response based on status code
+type ClientResponseStatus interface {
+ IsSuccess() bool
+ IsRedirect() bool
+ IsClientError() bool
+ IsServerError() bool
+ IsCode(int) bool
+}
diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go
new file mode 100644
index 00000000..51596924
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/constants.go
@@ -0,0 +1,49 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+const (
+ // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type
+ HeaderContentType = "Content-Type"
+
+ // HeaderTransferEncoding represents a http transfer-encoding header.
+ HeaderTransferEncoding = "Transfer-Encoding"
+
+ // HeaderAccept the Accept header
+ HeaderAccept = "Accept"
+ // HeaderAuthorization the Authorization header
+ HeaderAuthorization = "Authorization"
+
+ charsetKey = "charset"
+
+ // DefaultMime the default fallback mime type
+ DefaultMime = "application/octet-stream"
+ // JSONMime the json mime type
+ JSONMime = "application/json"
+ // YAMLMime the yaml mime type
+ YAMLMime = "application/x-yaml"
+ // XMLMime the xml mime type
+ XMLMime = "application/xml"
+ // TextMime the text mime type
+ TextMime = "text/plain"
+ // HTMLMime the html mime type
+ HTMLMime = "text/html"
+ // CSVMime the csv mime type
+ CSVMime = "text/csv"
+ // MultipartFormMime the multipart form mime type
+ MultipartFormMime = "multipart/form-data"
+ // URLencodedFormMime the url encoded form mime type
+ URLencodedFormMime = "application/x-www-form-urlencoded"
+)
diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go
new file mode 100644
index 00000000..c9597bcd
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/csv.go
@@ -0,0 +1,350 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "bytes"
+ "context"
+ "encoding"
+ "encoding/csv"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+
+ "golang.org/x/sync/errgroup"
+)
+
+// CSVConsumer creates a new CSV consumer.
+//
+// The consumer consumes CSV records from a provided reader into the data passed by reference.
+//
+// CSVOpts options may be specified to alter the default CSV behavior on the reader and the writer side (e.g. separator, skip header, ...).
+// The defaults are those of the standard library's csv.Reader and csv.Writer.
+//
+// Supported output underlying types and interfaces, prioritized in this order:
+// - *csv.Writer
+// - CSVWriter (writer options are ignored)
+// - io.Writer (as raw bytes)
+// - io.ReaderFrom (as raw bytes)
+// - encoding.BinaryUnmarshaler (as raw bytes)
+// - *[][]string (as a collection of records)
+// - *[]byte (as raw bytes)
+// - *string (a raw bytes)
+//
+// The consumer prioritizes situations where buffering the input is not required.
+func CSVConsumer(opts ...CSVOpt) Consumer {
+ o := csvOptsWithDefaults(opts)
+
+ return ConsumerFunc(func(reader io.Reader, data interface{}) error {
+ if reader == nil {
+ return errors.New("CSVConsumer requires a reader")
+ }
+ if data == nil {
+ return errors.New("nil destination for CSVConsumer")
+ }
+
+ csvReader := csv.NewReader(reader)
+ o.applyToReader(csvReader)
+ closer := defaultCloser
+ if o.closeStream {
+ if cl, isReaderCloser := reader.(io.Closer); isReaderCloser {
+ closer = cl.Close
+ }
+ }
+ defer func() {
+ _ = closer()
+ }()
+
+ switch destination := data.(type) {
+ case *csv.Writer:
+ csvWriter := destination
+ o.applyToWriter(csvWriter)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case CSVWriter:
+ csvWriter := destination
+ // no writer options available
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.Writer:
+ csvWriter := csv.NewWriter(destination)
+ o.applyToWriter(csvWriter)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.ReaderFrom:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+ _, err := destination.ReadFrom(&buf)
+
+ return err
+
+ case encoding.BinaryUnmarshaler:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+
+ return destination.UnmarshalBinary(buf.Bytes())
+
+ default:
+ // support *[][]string, *[]byte, *string
+ if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr {
+ return errors.New("destination must be a pointer")
+ }
+
+ v := reflect.Indirect(reflect.ValueOf(data))
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String:
+ csvWriter := &csvRecordsWriter{}
+ // writer options are ignored
+ if err := pipeCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+
+ v.Grow(len(csvWriter.records))
+ v.SetCap(len(csvWriter.records)) // in case Grow was unnessary, trim down the capacity
+ v.SetLen(len(csvWriter.records))
+ reflect.Copy(v, reflect.ValueOf(csvWriter.records))
+
+ return nil
+
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+ v.SetBytes(buf.Bytes())
+
+ return nil
+
+ case t.Kind() == reflect.String:
+ var buf bytes.Buffer
+ csvWriter := csv.NewWriter(&buf)
+ o.applyToWriter(csvWriter)
+ if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
+ return err
+ }
+ v.SetString(buf.String())
+
+ return nil
+
+ default:
+ return fmt.Errorf("%v (%T) is not supported by the CSVConsumer, %s",
+ data, data, "can be resolved by supporting CSVWriter/Writer/BinaryUnmarshaler interface",
+ )
+ }
+ }
+ })
+}
+
+// CSVProducer creates a new CSV producer.
+//
+// The producer takes input data then writes as CSV to an output writer (essentially as a pipe).
+//
+// Supported input underlying types and interfaces, prioritized in this order:
+// - *csv.Reader
+// - CSVReader (reader options are ignored)
+// - io.Reader
+// - io.WriterTo
+// - encoding.BinaryMarshaler
+// - [][]string
+// - []byte
+// - string
+//
+// The producer prioritizes situations where buffering the input is not required.
+func CSVProducer(opts ...CSVOpt) Producer {
+ o := csvOptsWithDefaults(opts)
+
+ return ProducerFunc(func(writer io.Writer, data interface{}) error {
+ if writer == nil {
+ return errors.New("CSVProducer requires a writer")
+ }
+ if data == nil {
+ return errors.New("nil data for CSVProducer")
+ }
+
+ csvWriter := csv.NewWriter(writer)
+ o.applyToWriter(csvWriter)
+ closer := defaultCloser
+ if o.closeStream {
+ if cl, isWriterCloser := writer.(io.Closer); isWriterCloser {
+ closer = cl.Close
+ }
+ }
+ defer func() {
+ _ = closer()
+ }()
+
+ if rc, isDataCloser := data.(io.ReadCloser); isDataCloser {
+ defer rc.Close()
+ }
+
+ switch origin := data.(type) {
+ case *csv.Reader:
+ csvReader := origin
+ o.applyToReader(csvReader)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case CSVReader:
+ csvReader := origin
+ // no reader options available
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.Reader:
+ csvReader := csv.NewReader(origin)
+ o.applyToReader(csvReader)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case io.WriterTo:
+ // async piping of the writes performed by WriteTo
+ r, w := io.Pipe()
+ csvReader := csv.NewReader(r)
+ o.applyToReader(csvReader)
+
+ pipe, _ := errgroup.WithContext(context.Background())
+ pipe.Go(func() error {
+ _, err := origin.WriteTo(w)
+ _ = w.Close()
+ return err
+ })
+
+ pipe.Go(func() error {
+ defer func() {
+ _ = r.Close()
+ }()
+
+ return pipeCSV(csvWriter, csvReader, o)
+ })
+
+ return pipe.Wait()
+
+ case encoding.BinaryMarshaler:
+ buf, err := origin.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ rdr := bytes.NewBuffer(buf)
+ csvReader := csv.NewReader(rdr)
+
+ return bufferedCSV(csvWriter, csvReader, o)
+
+ default:
+ // support [][]string, []byte, string (or pointers to those)
+ v := reflect.Indirect(reflect.ValueOf(data))
+ t := v.Type()
+
+ switch {
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String:
+ csvReader := &csvRecordsWriter{
+ records: make([][]string, v.Len()),
+ }
+ reflect.Copy(reflect.ValueOf(csvReader.records), v)
+
+ return pipeCSV(csvWriter, csvReader, o)
+
+ case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
+ buf := bytes.NewBuffer(v.Bytes())
+ csvReader := csv.NewReader(buf)
+ o.applyToReader(csvReader)
+
+ return bufferedCSV(csvWriter, csvReader, o)
+
+ case t.Kind() == reflect.String:
+ buf := bytes.NewBufferString(v.String())
+ csvReader := csv.NewReader(buf)
+ o.applyToReader(csvReader)
+
+ return bufferedCSV(csvWriter, csvReader, o)
+
+ default:
+ return fmt.Errorf("%v (%T) is not supported by the CSVProducer, %s",
+ data, data, "can be resolved by supporting CSVReader/Reader/BinaryMarshaler interface",
+ )
+ }
+ }
+ })
+}
+
+// pipeCSV copies CSV records from a CSV reader to a CSV writer
+func pipeCSV(csvWriter CSVWriter, csvReader CSVReader, opts csvOpts) error {
+ for ; opts.skippedLines > 0; opts.skippedLines-- {
+ _, err := csvReader.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+
+ return err
+ }
+ }
+
+ for {
+ record, err := csvReader.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ return err
+ }
+
+ if err := csvWriter.Write(record); err != nil {
+ return err
+ }
+ }
+
+ csvWriter.Flush()
+
+ return csvWriter.Error()
+}
+
+// bufferedCSV copies CSV records from a CSV reader to a CSV writer,
+// by first reading all records then writing them at once.
+func bufferedCSV(csvWriter *csv.Writer, csvReader *csv.Reader, opts csvOpts) error {
+ for ; opts.skippedLines > 0; opts.skippedLines-- {
+ _, err := csvReader.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+
+ return err
+ }
+ }
+
+ records, err := csvReader.ReadAll()
+ if err != nil {
+ return err
+ }
+
+ return csvWriter.WriteAll(records)
+}
diff --git a/vendor/github.com/go-openapi/runtime/csv_options.go b/vendor/github.com/go-openapi/runtime/csv_options.go
new file mode 100644
index 00000000..c16464c5
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/csv_options.go
@@ -0,0 +1,121 @@
+package runtime
+
+import (
+ "encoding/csv"
+ "io"
+)
+
+// CSVOpts alter the behavior of the CSV consumer or producer.
+type CSVOpt func(*csvOpts)
+
+type csvOpts struct {
+ csvReader csv.Reader
+ csvWriter csv.Writer
+ skippedLines int
+ closeStream bool
+}
+
+// WithCSVReaderOpts specifies the options to csv.Reader
+// when reading CSV.
+func WithCSVReaderOpts(reader csv.Reader) CSVOpt {
+ return func(o *csvOpts) {
+ o.csvReader = reader
+ }
+}
+
+// WithCSVWriterOpts specifies the options to csv.Writer
+// when writing CSV.
+func WithCSVWriterOpts(writer csv.Writer) CSVOpt {
+ return func(o *csvOpts) {
+ o.csvWriter = writer
+ }
+}
+
+// WithCSVSkipLines will skip header lines.
+func WithCSVSkipLines(skipped int) CSVOpt {
+ return func(o *csvOpts) {
+ o.skippedLines = skipped
+ }
+}
+
+func WithCSVClosesStream() CSVOpt {
+ return func(o *csvOpts) {
+ o.closeStream = true
+ }
+}
+
+func (o csvOpts) applyToReader(in *csv.Reader) {
+ if o.csvReader.Comma != 0 {
+ in.Comma = o.csvReader.Comma
+ }
+ if o.csvReader.Comment != 0 {
+ in.Comment = o.csvReader.Comment
+ }
+ if o.csvReader.FieldsPerRecord != 0 {
+ in.FieldsPerRecord = o.csvReader.FieldsPerRecord
+ }
+
+ in.LazyQuotes = o.csvReader.LazyQuotes
+ in.TrimLeadingSpace = o.csvReader.TrimLeadingSpace
+ in.ReuseRecord = o.csvReader.ReuseRecord
+}
+
+func (o csvOpts) applyToWriter(in *csv.Writer) {
+ if o.csvWriter.Comma != 0 {
+ in.Comma = o.csvWriter.Comma
+ }
+ in.UseCRLF = o.csvWriter.UseCRLF
+}
+
+func csvOptsWithDefaults(opts []CSVOpt) csvOpts {
+ var o csvOpts
+ for _, apply := range opts {
+ apply(&o)
+ }
+
+ return o
+}
+
+type CSVWriter interface {
+ Write([]string) error
+ Flush()
+ Error() error
+}
+
+type CSVReader interface {
+ Read() ([]string, error)
+}
+
+var (
+ _ CSVWriter = &csvRecordsWriter{}
+ _ CSVReader = &csvRecordsWriter{}
+)
+
+// csvRecordsWriter is an internal container to move CSV records back and forth
+type csvRecordsWriter struct {
+ i int
+ records [][]string
+}
+
+func (w *csvRecordsWriter) Write(record []string) error {
+ w.records = append(w.records, record)
+
+ return nil
+}
+
+func (w *csvRecordsWriter) Read() ([]string, error) {
+ if w.i >= len(w.records) {
+ return nil, io.EOF
+ }
+ defer func() {
+ w.i++
+ }()
+
+ return w.records[w.i], nil
+}
+
+func (w *csvRecordsWriter) Flush() {}
+
+func (w *csvRecordsWriter) Error() error {
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go
new file mode 100644
index 00000000..0d390cfd
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/discard.go
@@ -0,0 +1,9 @@
+package runtime
+
+import "io"
+
+// DiscardConsumer does absolutely nothing, it's a black hole.
+var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil })
+
+// DiscardProducer does absolutely nothing, it's a black hole.
+var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil })
diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go
new file mode 100644
index 00000000..397d8a45
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/file.go
@@ -0,0 +1,19 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import "github.com/go-openapi/swag"
+
+type File = swag.File
diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go
new file mode 100644
index 00000000..4d111db4
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/headers.go
@@ -0,0 +1,45 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "mime"
+ "net/http"
+
+ "github.com/go-openapi/errors"
+)
+
+// ContentType parses a content type header
+func ContentType(headers http.Header) (string, string, error) {
+ ct := headers.Get(HeaderContentType)
+ orig := ct
+ if ct == "" {
+ ct = DefaultMime
+ }
+ if ct == "" {
+ return "", "", nil
+ }
+
+ mt, opts, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return "", "", errors.NewParseError(HeaderContentType, "header", orig, err)
+ }
+
+ if cs, ok := opts[charsetKey]; ok {
+ return mt, cs, nil
+ }
+
+ return mt, "", nil
+}
diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go
new file mode 100644
index 00000000..e3341286
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/interfaces.go
@@ -0,0 +1,112 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/go-openapi/strfmt"
+)
+
+// OperationHandlerFunc an adapter for a function to the OperationHandler interface
+type OperationHandlerFunc func(interface{}) (interface{}, error)
+
+// Handle implements the operation handler interface
+func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) {
+ return s(data)
+}
+
+// OperationHandler a handler for a swagger operation
+type OperationHandler interface {
+ Handle(interface{}) (interface{}, error)
+}
+
+// ConsumerFunc represents a function that can be used as a consumer
+type ConsumerFunc func(io.Reader, interface{}) error
+
+// Consume consumes the reader into the data parameter
+func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error {
+ return fn(reader, data)
+}
+
+// Consumer implementations know how to bind the values on the provided interface to
+// data provided by the request body
+type Consumer interface {
+ // Consume performs the binding of request values
+ Consume(io.Reader, interface{}) error
+}
+
+// ProducerFunc represents a function that can be used as a producer
+type ProducerFunc func(io.Writer, interface{}) error
+
+// Produce produces the response for the provided data
+func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error {
+ return f(writer, data)
+}
+
+// Producer implementations know how to turn the provided interface into a valid
+// HTTP response
+type Producer interface {
+ // Produce writes to the http response
+ Produce(io.Writer, interface{}) error
+}
+
+// AuthenticatorFunc turns a function into an authenticator
+type AuthenticatorFunc func(interface{}) (bool, interface{}, error)
+
+// Authenticate authenticates the request with the provided data
+func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) {
+ return f(params)
+}
+
+// Authenticator represents an authentication strategy
+// implementations of Authenticator know how to authenticate the
+// request data and translate that into a valid principal object or an error
+type Authenticator interface {
+ Authenticate(interface{}) (bool, interface{}, error)
+}
+
+// AuthorizerFunc turns a function into an authorizer
+type AuthorizerFunc func(*http.Request, interface{}) error
+
+// Authorize authorizes the processing of the request for the principal
+func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error {
+ return f(r, principal)
+}
+
+// Authorizer represents an authorization strategy
+// implementations of Authorizer know how to authorize the principal object
+// using the request data and returns error if unauthorized
+type Authorizer interface {
+ Authorize(*http.Request, interface{}) error
+}
+
+// Validatable types implementing this interface allow customizing their validation
+// this will be used instead of the reflective validation based on the spec document.
+// the implementations are assumed to have been generated by the swagger tool so they should
+// contain all the validations obtained from the spec
+type Validatable interface {
+ Validate(strfmt.Registry) error
+}
+
+// ContextValidatable types implementing this interface allow customizing their validation
+// this will be used instead of the reflective validation based on the spec document.
+// the implementations are assumed to have been generated by the swagger tool so they should
+// contain all the context validations obtained from the spec
+type ContextValidatable interface {
+ ContextValidate(context.Context, strfmt.Registry) error
+}
diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go
new file mode 100644
index 00000000..5a690559
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/json.go
@@ -0,0 +1,38 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// JSONConsumer creates a new JSON consumer
+func JSONConsumer() Consumer {
+ return ConsumerFunc(func(reader io.Reader, data interface{}) error {
+ dec := json.NewDecoder(reader)
+ dec.UseNumber() // preserve number formats
+ return dec.Decode(data)
+ })
+}
+
+// JSONProducer creates a new JSON producer
+func JSONProducer() Producer {
+ return ProducerFunc(func(writer io.Writer, data interface{}) error {
+ enc := json.NewEncoder(writer)
+ enc.SetEscapeHTML(false)
+ return enc.Encode(data)
+ })
+}
diff --git a/vendor/github.com/go-openapi/runtime/logger/logger.go b/vendor/github.com/go-openapi/runtime/logger/logger.go
new file mode 100644
index 00000000..6f4debcc
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/logger/logger.go
@@ -0,0 +1,20 @@
+package logger
+
+import "os"
+
+type Logger interface {
+ Printf(format string, args ...interface{})
+ Debugf(format string, args ...interface{})
+}
+
+func DebugEnabled() bool {
+ d := os.Getenv("SWAGGER_DEBUG")
+ if d != "" && d != "false" && d != "0" {
+ return true
+ }
+ d = os.Getenv("DEBUG")
+ if d != "" && d != "false" && d != "0" {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/go-openapi/runtime/logger/standard.go b/vendor/github.com/go-openapi/runtime/logger/standard.go
new file mode 100644
index 00000000..30035a77
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/logger/standard.go
@@ -0,0 +1,24 @@
+package logger
+
+import (
+ "fmt"
+ "os"
+)
+
+var _ Logger = StandardLogger{}
+
+type StandardLogger struct{}
+
+func (StandardLogger) Printf(format string, args ...interface{}) {
+ if len(format) == 0 || format[len(format)-1] != '\n' {
+ format += "\n"
+ }
+ fmt.Fprintf(os.Stderr, format, args...)
+}
+
+func (StandardLogger) Debugf(format string, args ...interface{}) {
+ if len(format) == 0 || format[len(format)-1] != '\n' {
+ format += "\n"
+ }
+ fmt.Fprintf(os.Stderr, format, args...)
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/context.go b/vendor/github.com/go-openapi/runtime/middleware/context.go
new file mode 100644
index 00000000..44cecf11
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/context.go
@@ -0,0 +1,722 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middleware
+
+import (
+ stdContext "context"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "sync"
+
+ "github.com/go-openapi/analysis"
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/loads"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/runtime/logger"
+ "github.com/go-openapi/runtime/middleware/untyped"
+ "github.com/go-openapi/runtime/security"
+)
+
+// Debug when true turns on verbose logging
+var Debug = logger.DebugEnabled()
+
+// Logger is the standard libray logger used for printing debug messages
+var Logger logger.Logger = logger.StandardLogger{}
+
+func debugLogfFunc(lg logger.Logger) func(string, ...any) {
+ if logger.DebugEnabled() {
+ if lg == nil {
+ return Logger.Debugf
+ }
+
+ return lg.Debugf
+ }
+
+ // muted logger
+ return func(_ string, _ ...any) {}
+}
+
+// A Builder can create middlewares
+type Builder func(http.Handler) http.Handler
+
+// PassthroughBuilder returns the handler, aka the builder identity function
+func PassthroughBuilder(handler http.Handler) http.Handler { return handler }
+
+// RequestBinder is an interface for types to implement
+// when they want to be able to bind from a request
+type RequestBinder interface {
+ BindRequest(*http.Request, *MatchedRoute) error
+}
+
+// Responder is an interface for types to implement
+// when they want to be considered for writing HTTP responses
+type Responder interface {
+ WriteResponse(http.ResponseWriter, runtime.Producer)
+}
+
+// ResponderFunc wraps a func as a Responder interface
+type ResponderFunc func(http.ResponseWriter, runtime.Producer)
+
+// WriteResponse writes to the response
+func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Producer) {
+ fn(rw, pr)
+}
+
+// Context is a type safe wrapper around an untyped request context
+// used throughout to store request context with the standard context attached
+// to the http.Request
+type Context struct {
+ spec *loads.Document
+ analyzer *analysis.Spec
+ api RoutableAPI
+ router Router
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
+}
+
+type routableUntypedAPI struct {
+ api *untyped.API
+ hlock *sync.Mutex
+ handlers map[string]map[string]http.Handler
+ defaultConsumes string
+ defaultProduces string
+}
+
+func newRoutableUntypedAPI(spec *loads.Document, api *untyped.API, context *Context) *routableUntypedAPI {
+ var handlers map[string]map[string]http.Handler
+ if spec == nil || api == nil {
+ return nil
+ }
+ analyzer := analysis.New(spec.Spec())
+ for method, hls := range analyzer.Operations() {
+ um := strings.ToUpper(method)
+ for path, op := range hls {
+ schemes := analyzer.SecurityRequirementsFor(op)
+
+ if oh, ok := api.OperationHandlerFor(method, path); ok {
+ if handlers == nil {
+ handlers = make(map[string]map[string]http.Handler)
+ }
+ if b, ok := handlers[um]; !ok || b == nil {
+ handlers[um] = make(map[string]http.Handler)
+ }
+
+ var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // lookup route info in the context
+ route, rCtx, _ := context.RouteInfo(r)
+ if rCtx != nil {
+ r = rCtx
+ }
+
+ // bind and validate the request using reflection
+ var bound interface{}
+ var validation error
+ bound, r, validation = context.BindAndValidate(r, route)
+ if validation != nil {
+ context.Respond(w, r, route.Produces, route, validation)
+ return
+ }
+
+ // actually handle the request
+ result, err := oh.Handle(bound)
+ if err != nil {
+ // respond with failure
+ context.Respond(w, r, route.Produces, route, err)
+ return
+ }
+
+ // respond with success
+ context.Respond(w, r, route.Produces, route, result)
+ })
+
+ if len(schemes) > 0 {
+ handler = newSecureAPI(context, handler)
+ }
+ handlers[um][path] = handler
+ }
+ }
+ }
+
+ return &routableUntypedAPI{
+ api: api,
+ hlock: new(sync.Mutex),
+ handlers: handlers,
+ defaultProduces: api.DefaultProduces,
+ defaultConsumes: api.DefaultConsumes,
+ }
+}
+
+func (r *routableUntypedAPI) HandlerFor(method, path string) (http.Handler, bool) {
+ r.hlock.Lock()
+ paths, ok := r.handlers[strings.ToUpper(method)]
+ if !ok {
+ r.hlock.Unlock()
+ return nil, false
+ }
+ handler, ok := paths[path]
+ r.hlock.Unlock()
+ return handler, ok
+}
+func (r *routableUntypedAPI) ServeErrorFor(_ string) func(http.ResponseWriter, *http.Request, error) {
+ return r.api.ServeError
+}
+func (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer {
+ return r.api.ConsumersFor(mediaTypes)
+}
+func (r *routableUntypedAPI) ProducersFor(mediaTypes []string) map[string]runtime.Producer {
+ return r.api.ProducersFor(mediaTypes)
+}
+func (r *routableUntypedAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator {
+ return r.api.AuthenticatorsFor(schemes)
+}
+func (r *routableUntypedAPI) Authorizer() runtime.Authorizer {
+ return r.api.Authorizer()
+}
+func (r *routableUntypedAPI) Formats() strfmt.Registry {
+ return r.api.Formats()
+}
+
+func (r *routableUntypedAPI) DefaultProduces() string {
+ return r.defaultProduces
+}
+
+func (r *routableUntypedAPI) DefaultConsumes() string {
+ return r.defaultConsumes
+}
+
+// NewRoutableContext creates a new context for a routable API.
+//
+// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
+func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context {
+ var an *analysis.Spec
+ if spec != nil {
+ an = analysis.New(spec.Spec())
+ }
+
+ return NewRoutableContextWithAnalyzedSpec(spec, an, routableAPI, routes)
+}
+
+// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes as input an already analysed spec.
+//
+// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
+func NewRoutableContextWithAnalyzedSpec(spec *loads.Document, an *analysis.Spec, routableAPI RoutableAPI, routes Router) *Context {
+ // Either there are no spec doc and analysis, or both of them.
+ if !((spec == nil && an == nil) || (spec != nil && an != nil)) {
+ panic(errors.New(http.StatusInternalServerError, "routable context requires either both spec doc and analysis, or none of them"))
+ }
+
+ return &Context{
+ spec: spec,
+ api: routableAPI,
+ analyzer: an,
+ router: routes,
+ debugLogf: debugLogfFunc(nil),
+ }
+}
+
+// NewContext creates a new context wrapper.
+//
+// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
+func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context {
+ var an *analysis.Spec
+ if spec != nil {
+ an = analysis.New(spec.Spec())
+ }
+ ctx := &Context{
+ spec: spec,
+ analyzer: an,
+ router: routes,
+ debugLogf: debugLogfFunc(nil),
+ }
+ ctx.api = newRoutableUntypedAPI(spec, api, ctx)
+
+ return ctx
+}
+
+// Serve serves the specified spec with the specified api registrations as a http.Handler
+func Serve(spec *loads.Document, api *untyped.API) http.Handler {
+ return ServeWithBuilder(spec, api, PassthroughBuilder)
+}
+
+// ServeWithBuilder serves the specified spec with the specified api registrations as a http.Handler that is decorated
+// by the Builder
+func ServeWithBuilder(spec *loads.Document, api *untyped.API, builder Builder) http.Handler {
+ context := NewContext(spec, api, nil)
+ return context.APIHandler(builder)
+}
+
+type contextKey int8
+
+const (
+ _ contextKey = iota
+ ctxContentType
+ ctxResponseFormat
+ ctxMatchedRoute
+ ctxBoundParams
+ ctxSecurityPrincipal
+ ctxSecurityScopes
+)
+
+// MatchedRouteFrom request context value.
+func MatchedRouteFrom(req *http.Request) *MatchedRoute {
+ mr := req.Context().Value(ctxMatchedRoute)
+ if mr == nil {
+ return nil
+ }
+ if res, ok := mr.(*MatchedRoute); ok {
+ return res
+ }
+ return nil
+}
+
+// SecurityPrincipalFrom request context value.
+func SecurityPrincipalFrom(req *http.Request) interface{} {
+ return req.Context().Value(ctxSecurityPrincipal)
+}
+
+// SecurityScopesFrom request context value.
+func SecurityScopesFrom(req *http.Request) []string {
+ rs := req.Context().Value(ctxSecurityScopes)
+ if res, ok := rs.([]string); ok {
+ return res
+ }
+ return nil
+}
+
+type contentTypeValue struct {
+ MediaType string
+ Charset string
+}
+
+// BasePath returns the base path for this API
+func (c *Context) BasePath() string {
+ return c.spec.BasePath()
+}
+
+// SetLogger allows for injecting a logger to catch debug entries.
+//
+// The logger is enabled in DEBUG mode only.
+func (c *Context) SetLogger(lg logger.Logger) {
+ c.debugLogf = debugLogfFunc(lg)
+}
+
+// RequiredProduces returns the accepted content types for responses
+func (c *Context) RequiredProduces() []string {
+ return c.analyzer.RequiredProduces()
+}
+
+// BindValidRequest binds a params object to a request but only when the request is valid
+// if the request is not valid an error will be returned
+func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error {
+ var res []error
+ var requestContentType string
+
+ // check and validate content type, select consumer
+ if runtime.HasBody(request) {
+ ct, _, err := runtime.ContentType(request.Header)
+ if err != nil {
+ res = append(res, err)
+ } else {
+ c.debugLogf("validating content type for %q against [%s]", ct, strings.Join(route.Consumes, ", "))
+ if err := validateContentType(route.Consumes, ct); err != nil {
+ res = append(res, err)
+ }
+ if len(res) == 0 {
+ cons, ok := route.Consumers[ct]
+ if !ok {
+ res = append(res, errors.New(500, "no consumer registered for %s", ct))
+ } else {
+ route.Consumer = cons
+ requestContentType = ct
+ }
+ }
+ }
+ }
+
+ // check and validate the response format
+ if len(res) == 0 {
+ // if the route does not provide Produces and a default contentType could not be identified
+ // based on a body, typical for GET and DELETE requests, then default contentType to.
+ if len(route.Produces) == 0 && requestContentType == "" {
+ requestContentType = "*/*"
+ }
+
+ if str := NegotiateContentType(request, route.Produces, requestContentType); str == "" {
+ res = append(res, errors.InvalidResponseFormat(request.Header.Get(runtime.HeaderAccept), route.Produces))
+ }
+ }
+
+ // now bind the request with the provided binder
+ // it's assumed the binder will also validate the request and return an error if the
+ // request is invalid
+ if binder != nil && len(res) == 0 {
+ if err := binder.BindRequest(request, route); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// ContentType gets the parsed value of a content type
+// Returns the media type, its charset and a shallow copy of the request
+// when its context doesn't contain the content type value, otherwise it returns
+// the same request
+// Returns the error that runtime.ContentType may retunrs.
+func (c *Context) ContentType(request *http.Request) (string, string, *http.Request, error) {
+ var rCtx = request.Context()
+
+ if v, ok := rCtx.Value(ctxContentType).(*contentTypeValue); ok {
+ return v.MediaType, v.Charset, request, nil
+ }
+
+ mt, cs, err := runtime.ContentType(request.Header)
+ if err != nil {
+ return "", "", nil, err
+ }
+ rCtx = stdContext.WithValue(rCtx, ctxContentType, &contentTypeValue{mt, cs})
+ return mt, cs, request.WithContext(rCtx), nil
+}
+
+// LookupRoute looks a route up and returns true when it is found
+func (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) {
+ if route, ok := c.router.Lookup(request.Method, request.URL.EscapedPath()); ok {
+ return route, ok
+ }
+ return nil, false
+}
+
+// RouteInfo tries to match a route for this request
+// Returns the matched route, a shallow copy of the request if its context
+// contains the matched router, otherwise the same request, and a bool to
+// indicate if it the request matches one of the routes, if it doesn't
+// then it returns false and nil for the other two return values
+func (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, *http.Request, bool) {
+ var rCtx = request.Context()
+
+ if v, ok := rCtx.Value(ctxMatchedRoute).(*MatchedRoute); ok {
+ return v, request, ok
+ }
+
+ if route, ok := c.LookupRoute(request); ok {
+ rCtx = stdContext.WithValue(rCtx, ctxMatchedRoute, route)
+ return route, request.WithContext(rCtx), ok
+ }
+
+ return nil, nil, false
+}
+
+// ResponseFormat negotiates the response content type
+// Returns the response format and a shallow copy of the request if its context
+// doesn't contain the response format, otherwise the same request
+func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *http.Request) {
+ var rCtx = r.Context()
+
+ if v, ok := rCtx.Value(ctxResponseFormat).(string); ok {
+ c.debugLogf("[%s %s] found response format %q in context", r.Method, r.URL.Path, v)
+ return v, r
+ }
+
+ format := NegotiateContentType(r, offers, "")
+ if format != "" {
+ c.debugLogf("[%s %s] set response format %q in context", r.Method, r.URL.Path, format)
+ r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format))
+ }
+ c.debugLogf("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format)
+ return format, r
+}
+
+// AllowedMethods gets the allowed methods for the path of this request
+func (c *Context) AllowedMethods(request *http.Request) []string {
+ return c.router.OtherMethods(request.Method, request.URL.EscapedPath())
+}
+
+// ResetAuth removes the current principal from the request context
+func (c *Context) ResetAuth(request *http.Request) *http.Request {
+ rctx := request.Context()
+ rctx = stdContext.WithValue(rctx, ctxSecurityPrincipal, nil)
+ rctx = stdContext.WithValue(rctx, ctxSecurityScopes, nil)
+ return request.WithContext(rctx)
+}
+
+// Authorize authorizes the request
+// Returns the principal object and a shallow copy of the request when its
+// context doesn't contain the principal, otherwise the same request or an error
+// (the last) if one of the authenticators returns one or an Unauthenticated error
+func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (interface{}, *http.Request, error) {
+ if route == nil || !route.HasAuth() {
+ return nil, nil, nil
+ }
+
+ var rCtx = request.Context()
+ if v := rCtx.Value(ctxSecurityPrincipal); v != nil {
+ return v, request, nil
+ }
+
+ applies, usr, err := route.Authenticators.Authenticate(request, route)
+ if !applies || err != nil || !route.Authenticators.AllowsAnonymous() && usr == nil {
+ if err != nil {
+ return nil, nil, err
+ }
+ return nil, nil, errors.Unauthenticated("invalid credentials")
+ }
+ if route.Authorizer != nil {
+ if err := route.Authorizer.Authorize(request, usr); err != nil {
+ if _, ok := err.(errors.Error); ok {
+ return nil, nil, err
+ }
+
+ return nil, nil, errors.New(http.StatusForbidden, err.Error())
+ }
+ }
+
+ rCtx = request.Context()
+
+ rCtx = stdContext.WithValue(rCtx, ctxSecurityPrincipal, usr)
+ rCtx = stdContext.WithValue(rCtx, ctxSecurityScopes, route.Authenticator.AllScopes())
+ return usr, request.WithContext(rCtx), nil
+}
+
+// BindAndValidate binds and validates the request
+// Returns the validation map and a shallow copy of the request when its context
+// doesn't contain the validation, otherwise it returns the same request or an
+// CompositeValidationError error
+func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (interface{}, *http.Request, error) {
+ var rCtx = request.Context()
+
+ if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok {
+ c.debugLogf("got cached validation (valid: %t)", len(v.result) == 0)
+ if len(v.result) > 0 {
+ return v.bound, request, errors.CompositeValidationError(v.result...)
+ }
+ return v.bound, request, nil
+ }
+ result := validateRequest(c, request, matched)
+ rCtx = stdContext.WithValue(rCtx, ctxBoundParams, result)
+ request = request.WithContext(rCtx)
+ if len(result.result) > 0 {
+ return result.bound, request, errors.CompositeValidationError(result.result...)
+ }
+ c.debugLogf("no validation errors found")
+ return result.bound, request, nil
+}
+
+// NotFound the default not found responder for when no route has been matched yet
+func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) {
+ c.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound("not found"))
+}
+
+// Respond renders the response after doing some content negotiation
+func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) {
+ c.debugLogf("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces)
+ offers := []string{}
+ for _, mt := range produces {
+ if mt != c.api.DefaultProduces() {
+ offers = append(offers, mt)
+ }
+ }
+ // the default producer is last so more specific producers take precedence
+ offers = append(offers, c.api.DefaultProduces())
+ c.debugLogf("offers: %v", offers)
+
+ var format string
+ format, r = c.ResponseFormat(r, offers)
+ rw.Header().Set(runtime.HeaderContentType, format)
+
+ if resp, ok := data.(Responder); ok {
+ producers := route.Producers
+ // producers contains keys with normalized format, if a format has MIME type parameter such as `text/plain; charset=utf-8`
+ // then you must provide `text/plain` to get the correct producer. HOWEVER, format here is not normalized.
+ prod, ok := producers[normalizeOffer(format)]
+ if !ok {
+ prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()}))
+ pr, ok := prods[c.api.DefaultProduces()]
+ if !ok {
+ panic(errors.New(http.StatusInternalServerError, cantFindProducer(format)))
+ }
+ prod = pr
+ }
+ resp.WriteResponse(rw, prod)
+ return
+ }
+
+ if err, ok := data.(error); ok {
+ if format == "" {
+ rw.Header().Set(runtime.HeaderContentType, runtime.JSONMime)
+ }
+
+ if realm := security.FailedBasicAuth(r); realm != "" {
+ rw.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", realm))
+ }
+
+ if route == nil || route.Operation == nil {
+ c.api.ServeErrorFor("")(rw, r, err)
+ return
+ }
+ c.api.ServeErrorFor(route.Operation.ID)(rw, r, err)
+ return
+ }
+
+ if route == nil || route.Operation == nil {
+ rw.WriteHeader(http.StatusOK)
+ if r.Method == http.MethodHead {
+ return
+ }
+ producers := c.api.ProducersFor(normalizeOffers(offers))
+ prod, ok := producers[format]
+ if !ok {
+ panic(errors.New(http.StatusInternalServerError, cantFindProducer(format)))
+ }
+ if err := prod.Produce(rw, data); err != nil {
+ panic(err) // let the recovery middleware deal with this
+ }
+ return
+ }
+
+ if _, code, ok := route.Operation.SuccessResponse(); ok {
+ rw.WriteHeader(code)
+ if code == http.StatusNoContent || r.Method == http.MethodHead {
+ return
+ }
+
+ producers := route.Producers
+ prod, ok := producers[format]
+ if !ok {
+ if !ok {
+ prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()}))
+ pr, ok := prods[c.api.DefaultProduces()]
+ if !ok {
+ panic(errors.New(http.StatusInternalServerError, cantFindProducer(format)))
+ }
+ prod = pr
+ }
+ }
+ if err := prod.Produce(rw, data); err != nil {
+ panic(err) // let the recovery middleware deal with this
+ }
+ return
+ }
+
+ c.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, "can't produce response"))
+}
+
+// APIHandlerSwaggerUI returns a handler to serve the API.
+//
+// This handler includes a swagger spec, router and the contract defined in the swagger spec.
+//
+// A spec UI (SwaggerUI) is served at {API base path}/docs and the spec document at /swagger.json
+// (these can be modified with uiOptions).
+func (c *Context) APIHandlerSwaggerUI(builder Builder, opts ...UIOption) http.Handler {
+ b := builder
+ if b == nil {
+ b = PassthroughBuilder
+ }
+
+ specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
+ var swaggerUIOpts SwaggerUIOpts
+ fromCommonToAnyOptions(uiOpts, &swaggerUIOpts)
+
+ return Spec(specPath, c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b)), specOpts...)
+}
+
+// APIHandlerRapiDoc returns a handler to serve the API.
+//
+// This handler includes a swagger spec, router and the contract defined in the swagger spec.
+//
+// A spec UI (RapiDoc) is served at {API base path}/docs and the spec document at /swagger.json
+// (these can be modified with uiOptions).
+func (c *Context) APIHandlerRapiDoc(builder Builder, opts ...UIOption) http.Handler {
+ b := builder
+ if b == nil {
+ b = PassthroughBuilder
+ }
+
+ specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
+ var rapidocUIOpts RapiDocOpts
+ fromCommonToAnyOptions(uiOpts, &rapidocUIOpts)
+
+ return Spec(specPath, c.spec.Raw(), RapiDoc(rapidocUIOpts, c.RoutesHandler(b)), specOpts...)
+}
+
+// APIHandler returns a handler to serve the API.
+//
+// This handler includes a swagger spec, router and the contract defined in the swagger spec.
+//
+// A spec UI (Redoc) is served at {API base path}/docs and the spec document at /swagger.json
+// (these can be modified with uiOptions).
+func (c *Context) APIHandler(builder Builder, opts ...UIOption) http.Handler {
+ b := builder
+ if b == nil {
+ b = PassthroughBuilder
+ }
+
+ specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
+ var redocOpts RedocOpts
+ fromCommonToAnyOptions(uiOpts, &redocOpts)
+
+ return Spec(specPath, c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b)), specOpts...)
+}
+
+func (c Context) uiOptionsForHandler(opts []UIOption) (string, uiOptions, []SpecOption) {
+ var title string
+ sp := c.spec.Spec()
+ if sp != nil && sp.Info != nil && sp.Info.Title != "" {
+ title = sp.Info.Title
+ }
+
+ // default options (may be overridden)
+ optsForContext := []UIOption{
+ WithUIBasePath(c.BasePath()),
+ WithUITitle(title),
+ }
+ optsForContext = append(optsForContext, opts...)
+ uiOpts := uiOptionsWithDefaults(optsForContext)
+
+ // If spec URL is provided, there is a non-default path to serve the spec.
+ // This makes sure that the UI middleware is aligned with the Spec middleware.
+ u, _ := url.Parse(uiOpts.SpecURL)
+ var specPath string
+ if u != nil {
+ specPath = u.Path
+ }
+
+ pth, doc := path.Split(specPath)
+ if pth == "." {
+ pth = ""
+ }
+
+ return pth, uiOpts, []SpecOption{WithSpecDocument(doc)}
+}
+
+// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec
+func (c *Context) RoutesHandler(builder Builder) http.Handler {
+ b := builder
+ if b == nil {
+ b = PassthroughBuilder
+ }
+ return NewRouter(c, b(NewOperationExecutor(c)))
+}
+
+func cantFindProducer(format string) string {
+ return "can't find a producer for " + format
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE b/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE
new file mode 100644
index 00000000..e65039ad
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2014 Naoya Inada
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md
new file mode 100644
index 00000000..30109e17
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md
@@ -0,0 +1,180 @@
+# Denco [](https://travis-ci.org/naoina/denco)
+
+The fast and flexible HTTP request router for [Go](http://golang.org).
+
+Denco is based on Double-Array implementation of [Kocha-urlrouter](https://github.com/naoina/kocha-urlrouter).
+However, Denco is optimized and some features added.
+
+## Features
+
+* Fast (See [go-http-routing-benchmark](https://github.com/naoina/go-http-routing-benchmark))
+* [URL patterns](#url-patterns) (`/foo/:bar` and `/foo/*wildcard`)
+* Small (but enough) URL router API
+* HTTP request multiplexer like `http.ServeMux`
+
+## Installation
+
+ go get -u github.com/go-openapi/runtime/middleware/denco
+
+## Using as HTTP request multiplexer
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+
+ "github.com/go-openapi/runtime/middleware/denco"
+)
+
+func Index(w http.ResponseWriter, r *http.Request, params denco.Params) {
+ fmt.Fprintf(w, "Welcome to Denco!\n")
+}
+
+func User(w http.ResponseWriter, r *http.Request, params denco.Params) {
+ fmt.Fprintf(w, "Hello %s!\n", params.Get("name"))
+}
+
+func main() {
+ mux := denco.NewMux()
+ handler, err := mux.Build([]denco.Handler{
+ mux.GET("/", Index),
+ mux.GET("/user/:name", User),
+ mux.POST("/user/:name", User),
+ })
+ if err != nil {
+ panic(err)
+ }
+ log.Fatal(http.ListenAndServe(":8080", handler))
+}
+```
+
+## Using as URL router
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime/middleware/denco"
+)
+
+type route struct {
+ name string
+}
+
+func main() {
+ router := denco.New()
+ router.Build([]denco.Record{
+ {"/", &route{"root"}},
+ {"/user/:id", &route{"user"}},
+ {"/user/:name/:id", &route{"username"}},
+ {"/static/*filepath", &route{"static"}},
+ })
+
+ data, params, found := router.Lookup("/")
+ // print `&main.route{name:"root"}, denco.Params(nil), true`.
+ fmt.Printf("%#v, %#v, %#v\n", data, params, found)
+
+ data, params, found = router.Lookup("/user/hoge")
+ // print `&main.route{name:"user"}, denco.Params{denco.Param{Name:"id", Value:"hoge"}}, true`.
+ fmt.Printf("%#v, %#v, %#v\n", data, params, found)
+
+ data, params, found = router.Lookup("/user/hoge/7")
+ // print `&main.route{name:"username"}, denco.Params{denco.Param{Name:"name", Value:"hoge"}, denco.Param{Name:"id", Value:"7"}}, true`.
+ fmt.Printf("%#v, %#v, %#v\n", data, params, found)
+
+ data, params, found = router.Lookup("/static/path/to/file")
+ // print `&main.route{name:"static"}, denco.Params{denco.Param{Name:"filepath", Value:"path/to/file"}}, true`.
+ fmt.Printf("%#v, %#v, %#v\n", data, params, found)
+}
+```
+
+See [Godoc](http://godoc.org/github.com/go-openapi/runtime/middleware/denco) for more details.
+
+## Getting the value of path parameter
+
+You can get the value of path parameter by 2 ways.
+
+1. Using [`denco.Params.Get`](http://godoc.org/github.com/go-openapi/runtime/middleware/denco#Params.Get) method
+2. Find by loop
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime/middleware/denco"
+)
+
+func main() {
+ router := denco.New()
+ if err := router.Build([]denco.Record{
+ {"/user/:name/:id", "route1"},
+ }); err != nil {
+ panic(err)
+ }
+
+ // 1. Using denco.Params.Get method.
+ _, params, _ := router.Lookup("/user/alice/1")
+ name := params.Get("name")
+ if name != "" {
+ fmt.Printf("Hello %s.\n", name) // prints "Hello alice.".
+ }
+
+ // 2. Find by loop.
+ for _, param := range params {
+ if param.Name == "name" {
+ fmt.Printf("Hello %s.\n", name) // prints "Hello alice.".
+ }
+ }
+}
+```
+
+## URL patterns
+
+Denco's route matching strategy is "most nearly matching".
+
+When routes `/:name` and `/alice` have been built, URI `/alice` matches the route `/alice`, not `/:name`.
+Because URI `/alice` is more match with the route `/alice` than `/:name`.
+
+For more example, when routes below have been built:
+
+```
+/user/alice
+/user/:name
+/user/:name/:id
+/user/alice/:id
+/user/:id/bob
+```
+
+Routes matching are:
+
+```
+/user/alice => "/user/alice" (no match with "/user/:name")
+/user/bob => "/user/:name"
+/user/naoina/1 => "/user/:name/1"
+/user/alice/1 => "/user/alice/:id" (no match with "/user/:name/:id")
+/user/1/bob => "/user/:id/bob" (no match with "/user/:name/:id")
+/user/alice/bob => "/user/alice/:id" (no match with "/user/:name/:id" and "/user/:id/bob")
+```
+
+## Limitation
+
+Denco has some limitations below.
+
+* Number of param records (such as `/:name`) must be less than 2^22
+* Number of elements of internal slice must be less than 2^22
+
+## Benchmarks
+
+ cd $GOPATH/github.com/go-openapi/runtime/middleware/denco
+ go test -bench . -benchmem
+
+## License
+
+Denco is licensed under the MIT License.
diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go
new file mode 100644
index 00000000..4377f77a
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go
@@ -0,0 +1,467 @@
+// Package denco provides fast URL router.
+package denco
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+const (
+ // ParamCharacter is a special character for path parameter.
+ ParamCharacter = ':'
+
+ // WildcardCharacter is a special character for wildcard path parameter.
+ WildcardCharacter = '*'
+
+ // TerminationCharacter is a special character for end of path.
+ TerminationCharacter = '#'
+
+ // SeparatorCharacter separates path segments.
+ SeparatorCharacter = '/'
+
+ // PathParamCharacter indicates a RESTCONF path param
+ PathParamCharacter = '='
+
+ // MaxSize is max size of records and internal slice.
+ MaxSize = (1 << 22) - 1
+)
+
+// Router represents a URL router.
+type Router struct {
+ param *doubleArray
+ // SizeHint expects the maximum number of path parameters in records to Build.
+ // SizeHint will be used to determine the capacity of the memory to allocate.
+ // By default, SizeHint will be determined from given records to Build.
+ SizeHint int
+
+ static map[string]interface{}
+}
+
+// New returns a new Router.
+func New() *Router {
+ return &Router{
+ SizeHint: -1,
+ static: make(map[string]interface{}),
+ param: newDoubleArray(),
+ }
+}
+
+// Lookup returns data and path parameters that associated with path.
+// params is a slice of the Param that arranged in the order in which parameters appeared.
+// e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}].
+func (rt *Router) Lookup(path string) (data interface{}, params Params, found bool) {
+ if data, found = rt.static[path]; found {
+ return data, nil, true
+ }
+ if len(rt.param.node) == 1 {
+ return nil, nil, false
+ }
+ nd, params, found := rt.param.lookup(path, make([]Param, 0, rt.SizeHint), 1)
+ if !found {
+ return nil, nil, false
+ }
+ for i := 0; i < len(params); i++ {
+ params[i].Name = nd.paramNames[i]
+ }
+ return nd.data, params, true
+}
+
+// Build builds URL router from records.
+func (rt *Router) Build(records []Record) error {
+ statics, params := makeRecords(records)
+ if len(params) > MaxSize {
+ return errors.New("denco: too many records")
+ }
+ if rt.SizeHint < 0 {
+ rt.SizeHint = 0
+ for _, p := range params {
+ size := 0
+ for _, k := range p.Key {
+ if k == ParamCharacter || k == WildcardCharacter {
+ size++
+ }
+ }
+ if size > rt.SizeHint {
+ rt.SizeHint = size
+ }
+ }
+ }
+ for _, r := range statics {
+ rt.static[r.Key] = r.Value
+ }
+ if err := rt.param.build(params, 1, 0, make(map[int]struct{})); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Param represents name and value of path parameter.
+type Param struct {
+ Name string
+ Value string
+}
+
+// Params represents the name and value of path parameters.
+type Params []Param
+
+// Get gets the first value associated with the given name.
+// If there are no values associated with the key, Get returns "".
+func (ps Params) Get(name string) string {
+ for _, p := range ps {
+ if p.Name == name {
+ return p.Value
+ }
+ }
+ return ""
+}
+
+type doubleArray struct {
+ bc []baseCheck
+ node []*node
+}
+
+func newDoubleArray() *doubleArray {
+ return &doubleArray{
+ bc: []baseCheck{0},
+ node: []*node{nil}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node.
+ }
+}
+
+// baseCheck contains BASE, CHECK and Extra flags.
+// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK.
+//
+// BASE (22bit) | Extra flags (2bit) | CHECK (8bit)
+//
+// |----------------------|--|--------|
+// 32 10 8 0
+type baseCheck uint32
+
+func (bc baseCheck) Base() int {
+ return int(bc >> 10)
+}
+
+func (bc *baseCheck) SetBase(base int) {
+ *bc |= baseCheck(base) << 10
+}
+
+func (bc baseCheck) Check() byte {
+ return byte(bc)
+}
+
+func (bc *baseCheck) SetCheck(check byte) {
+ *bc |= baseCheck(check)
+}
+
+func (bc baseCheck) IsEmpty() bool {
+ return bc&0xfffffcff == 0
+}
+
+func (bc baseCheck) IsSingleParam() bool {
+ return bc¶mTypeSingle == paramTypeSingle
+}
+
+func (bc baseCheck) IsWildcardParam() bool {
+ return bc¶mTypeWildcard == paramTypeWildcard
+}
+
+func (bc baseCheck) IsAnyParam() bool {
+ return bc¶mTypeAny != 0
+}
+
+func (bc *baseCheck) SetSingleParam() {
+ *bc |= (1 << 8)
+}
+
+func (bc *baseCheck) SetWildcardParam() {
+ *bc |= (1 << 9)
+}
+
+const (
+ paramTypeSingle = 0x0100
+ paramTypeWildcard = 0x0200
+ paramTypeAny = 0x0300
+)
+
+func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Param, bool) {
+ indices := make([]uint64, 0, 1)
+ for i := 0; i < len(path); i++ {
+ if da.bc[idx].IsAnyParam() {
+ indices = append(indices, (uint64(i)<<32)|(uint64(idx)&0xffffffff))
+ }
+ c := path[i]
+ if idx = nextIndex(da.bc[idx].Base(), c); idx >= len(da.bc) || da.bc[idx].Check() != c {
+ goto BACKTRACKING
+ }
+ }
+ if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter {
+ return da.node[da.bc[next].Base()], params, true
+ }
+
+BACKTRACKING:
+ for j := len(indices) - 1; j >= 0; j-- {
+ i, idx := int(indices[j]>>32), int(indices[j]&0xffffffff)
+ if da.bc[idx].IsSingleParam() {
+ nextIdx := nextIndex(da.bc[idx].Base(), ParamCharacter)
+ if nextIdx >= len(da.bc) {
+ break
+ }
+
+ next := NextSeparator(path, i)
+ nextParams := params
+ nextParams = append(nextParams, Param{Value: path[i:next]})
+ if nd, nextNextParams, found := da.lookup(path[next:], nextParams, nextIdx); found {
+ return nd, nextNextParams, true
+ }
+ }
+
+ if da.bc[idx].IsWildcardParam() {
+ nextIdx := nextIndex(da.bc[idx].Base(), WildcardCharacter)
+ nextParams := params
+ nextParams = append(nextParams, Param{Value: path[i:]})
+ return da.node[da.bc[nextIdx].Base()], nextParams, true
+ }
+ }
+ return nil, nil, false
+}
+
+// build builds double-array from records.
+func (da *doubleArray) build(srcs []*record, idx, depth int, usedBase map[int]struct{}) error {
+ sort.Stable(recordSlice(srcs))
+ base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase)
+ if err != nil {
+ return err
+ }
+ if leaf != nil {
+ nd, err := makeNode(leaf)
+ if err != nil {
+ return err
+ }
+ da.bc[idx].SetBase(len(da.node))
+ da.node = append(da.node, nd)
+ }
+ for _, sib := range siblings {
+ da.setCheck(nextIndex(base, sib.c), sib.c)
+ }
+ for _, sib := range siblings {
+ records := srcs[sib.start:sib.end]
+ switch sib.c {
+ case ParamCharacter:
+ for _, r := range records {
+ next := NextSeparator(r.Key, depth+1)
+ name := r.Key[depth+1 : next]
+ r.paramNames = append(r.paramNames, name)
+ r.Key = r.Key[next:]
+ }
+ da.bc[idx].SetSingleParam()
+ if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil {
+ return err
+ }
+ case WildcardCharacter:
+ r := records[0]
+ name := r.Key[depth+1 : len(r.Key)-1]
+ r.paramNames = append(r.paramNames, name)
+ r.Key = ""
+ da.bc[idx].SetWildcardParam()
+ if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil {
+ return err
+ }
+ default:
+ if err := da.build(records, nextIndex(base, sib.c), depth+1, usedBase); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// setBase sets BASE.
+func (da *doubleArray) setBase(i, base int) {
+ da.bc[i].SetBase(base)
+}
+
+// setCheck sets CHECK.
+func (da *doubleArray) setCheck(i int, check byte) {
+ da.bc[i].SetCheck(check)
+}
+
+// findEmptyIndex returns an index of unused BASE/CHECK node.
+func (da *doubleArray) findEmptyIndex(start int) int {
+ i := start
+ for ; i < len(da.bc); i++ {
+ if da.bc[i].IsEmpty() {
+ break
+ }
+ }
+ return i
+}
+
+// findBase returns good BASE.
+func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) {
+ for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) {
+ base = nextIndex(idx, firstChar)
+ if _, used := usedBase[base]; used {
+ continue
+ }
+ i := 0
+ for ; i < len(siblings); i++ {
+ next := nextIndex(base, siblings[i].c)
+ if len(da.bc) <= next {
+ da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...)
+ }
+ if !da.bc[next].IsEmpty() {
+ break
+ }
+ }
+ if i == len(siblings) {
+ break
+ }
+ }
+ usedBase[base] = struct{}{}
+ return base
+}
+
+func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) {
+ siblings, leaf, err = makeSiblings(records, depth)
+ if err != nil {
+ return -1, nil, nil, err
+ }
+ if len(siblings) < 1 {
+ return -1, nil, leaf, nil
+ }
+ base = da.findBase(siblings, idx, usedBase)
+ if base > MaxSize {
+ return -1, nil, nil, errors.New("denco: too many elements of internal slice")
+ }
+ da.setBase(idx, base)
+ return base, siblings, leaf, err
+}
+
+// node represents a node of Double-Array.
+type node struct {
+ data interface{}
+
+ // Names of path parameters.
+ paramNames []string
+}
+
+// makeNode returns a new node from record.
+func makeNode(r *record) (*node, error) {
+ dups := make(map[string]bool)
+ for _, name := range r.paramNames {
+ if dups[name] {
+ return nil, fmt.Errorf("denco: path parameter `%v' is duplicated in the key `%v'", name, r.Key)
+ }
+ dups[name] = true
+ }
+ return &node{data: r.Value, paramNames: r.paramNames}, nil
+}
+
+// sibling represents an intermediate data of build for Double-Array.
+type sibling struct {
+ // An index of start of duplicated characters.
+ start int
+
+ // An index of end of duplicated characters.
+ end int
+
+ // A character of sibling.
+ c byte
+}
+
+// nextIndex returns a next index of array of BASE/CHECK.
+func nextIndex(base int, c byte) int {
+ return base ^ int(c)
+}
+
+// makeSiblings returns slice of sibling.
+func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, err error) {
+ var (
+ pc byte
+ n int
+ )
+ for i, r := range records {
+ if len(r.Key) <= depth {
+ leaf = r
+ continue
+ }
+ c := r.Key[depth]
+ switch {
+ case pc < c:
+ sib = append(sib, sibling{start: i, c: c})
+ case pc == c:
+ continue
+ default:
+ return nil, nil, errors.New("denco: BUG: routing table hasn't been sorted")
+ }
+ if n > 0 {
+ sib[n-1].end = i
+ }
+ pc = c
+ n++
+ }
+ if n == 0 {
+ return nil, leaf, nil
+ }
+ sib[n-1].end = len(records)
+ return sib, leaf, nil
+}
+
+// Record represents a record data for router construction.
+type Record struct {
+ // Key for router construction.
+ Key string
+
+ // Result value for Key.
+ Value interface{}
+}
+
+// NewRecord returns a new Record.
+func NewRecord(key string, value interface{}) Record {
+ return Record{
+ Key: key,
+ Value: value,
+ }
+}
+
+// record represents a record that use to build the Double-Array.
+type record struct {
+ Record
+ paramNames []string
+}
+
+// makeRecords returns the records that use to build Double-Arrays.
+func makeRecords(srcs []Record) (statics, params []*record) {
+ termChar := string(TerminationCharacter)
+ paramPrefix := string(SeparatorCharacter) + string(ParamCharacter)
+ wildcardPrefix := string(SeparatorCharacter) + string(WildcardCharacter)
+ restconfPrefix := string(PathParamCharacter) + string(ParamCharacter)
+ for _, r := range srcs {
+ if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) || strings.Contains(r.Key, restconfPrefix) {
+ r.Key += termChar
+ params = append(params, &record{Record: r})
+ } else {
+ statics = append(statics, &record{Record: r})
+ }
+ }
+ return statics, params
+}
+
+// recordSlice represents a slice of Record for sort and implements the sort.Interface.
+type recordSlice []*record
+
+// Len implements the sort.Interface.Len.
+func (rs recordSlice) Len() int {
+ return len(rs)
+}
+
+// Less implements the sort.Interface.Less.
+func (rs recordSlice) Less(i, j int) bool {
+ return rs[i].Key < rs[j].Key
+}
+
+// Swap implements the sort.Interface.Swap.
+func (rs recordSlice) Swap(i, j int) {
+ rs[i], rs[j] = rs[j], rs[i]
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go
new file mode 100644
index 00000000..0886713c
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go
@@ -0,0 +1,106 @@
+package denco
+
+import (
+ "net/http"
+)
+
+// Mux represents a multiplexer for HTTP request.
+type Mux struct{}
+
+// NewMux returns a new Mux.
+func NewMux() *Mux {
+ return &Mux{}
+}
+
+// GET is shorthand of Mux.Handler("GET", path, handler).
+func (m *Mux) GET(path string, handler HandlerFunc) Handler {
+ return m.Handler("GET", path, handler)
+}
+
+// POST is shorthand of Mux.Handler("POST", path, handler).
+func (m *Mux) POST(path string, handler HandlerFunc) Handler {
+ return m.Handler("POST", path, handler)
+}
+
+// PUT is shorthand of Mux.Handler("PUT", path, handler).
+func (m *Mux) PUT(path string, handler HandlerFunc) Handler {
+ return m.Handler("PUT", path, handler)
+}
+
+// HEAD is shorthand of Mux.Handler("HEAD", path, handler).
+func (m *Mux) HEAD(path string, handler HandlerFunc) Handler {
+ return m.Handler("HEAD", path, handler)
+}
+
+// Handler returns a handler for HTTP method.
+func (m *Mux) Handler(method, path string, handler HandlerFunc) Handler {
+ return Handler{
+ Method: method,
+ Path: path,
+ Func: handler,
+ }
+}
+
+// Build builds a http.Handler.
+func (m *Mux) Build(handlers []Handler) (http.Handler, error) {
+ recordMap := make(map[string][]Record)
+ for _, h := range handlers {
+ recordMap[h.Method] = append(recordMap[h.Method], NewRecord(h.Path, h.Func))
+ }
+ mux := newServeMux()
+ for m, records := range recordMap {
+ router := New()
+ if err := router.Build(records); err != nil {
+ return nil, err
+ }
+ mux.routers[m] = router
+ }
+ return mux, nil
+}
+
+// Handler represents a handler of HTTP request.
+type Handler struct {
+ // Method is an HTTP method.
+ Method string
+
+ // Path is a routing path for handler.
+ Path string
+
+ // Func is a function of handler of HTTP request.
+ Func HandlerFunc
+}
+
+// The HandlerFunc type is aliased to type of handler function.
+type HandlerFunc func(w http.ResponseWriter, r *http.Request, params Params)
+
+type serveMux struct {
+ routers map[string]*Router
+}
+
+func newServeMux() *serveMux {
+ return &serveMux{
+ routers: make(map[string]*Router),
+ }
+}
+
+// ServeHTTP implements http.Handler interface.
+func (mux *serveMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ handler, params := mux.handler(r.Method, r.URL.Path)
+ handler(w, r, params)
+}
+
+func (mux *serveMux) handler(method, path string) (HandlerFunc, []Param) {
+ if router, found := mux.routers[method]; found {
+ if handler, params, found := router.Lookup(path); found {
+ return handler.(HandlerFunc), params
+ }
+ }
+ return NotFound, nil
+}
+
+// NotFound replies to the request with an HTTP 404 not found error.
+// NotFound is called when unknown HTTP method or a handler not found.
+// If you want to use the your own NotFound handler, please overwrite this variable.
+var NotFound = func(w http.ResponseWriter, r *http.Request, _ Params) {
+ http.NotFound(w, r)
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go
new file mode 100644
index 00000000..edc1f6ab
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go
@@ -0,0 +1,12 @@
+package denco
+
+// NextSeparator returns an index of next separator in path.
+func NextSeparator(path string, start int) int {
+ for start < len(path) {
+ if c := path[start]; c == '/' || c == TerminationCharacter {
+ break
+ }
+ start++
+ }
+ return start
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/doc.go b/vendor/github.com/go-openapi/runtime/middleware/doc.go
new file mode 100644
index 00000000..836a9885
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/doc.go
@@ -0,0 +1,63 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package middleware provides the library with helper functions for serving swagger APIs.
+
+Pseudo middleware handler
+
+ import (
+ "net/http"
+
+ "github.com/go-openapi/errors"
+ )
+
+ func newCompleteMiddleware(ctx *Context) http.Handler {
+ return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ // use context to lookup routes
+ if matched, ok := ctx.RouteInfo(r); ok {
+
+ if matched.NeedsAuth() {
+ if _, err := ctx.Authorize(r, matched); err != nil {
+ ctx.Respond(rw, r, matched.Produces, matched, err)
+ return
+ }
+ }
+
+ bound, validation := ctx.BindAndValidate(r, matched)
+ if validation != nil {
+ ctx.Respond(rw, r, matched.Produces, matched, validation)
+ return
+ }
+
+ result, err := matched.Handler.Handle(bound)
+ if err != nil {
+ ctx.Respond(rw, r, matched.Produces, matched, err)
+ return
+ }
+
+ ctx.Respond(rw, r, matched.Produces, matched, result)
+ return
+ }
+
+ // Not found, check if it exists in the other methods first
+ if others := ctx.AllowedMethods(r); len(others) > 0 {
+ ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others))
+ return
+ }
+ ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path))
+ })
+ }
+*/
+package middleware
diff --git a/vendor/github.com/go-openapi/runtime/middleware/header/header.go b/vendor/github.com/go-openapi/runtime/middleware/header/header.go
new file mode 100644
index 00000000..df073c87
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/header/header.go
@@ -0,0 +1,332 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// this file was taken from the github.com/golang/gddo repository
+
+// Package header provides functions for parsing HTTP headers.
+package header
+
+import (
+ "net/http"
+ "strings"
+ "time"
+)
+
+// Octet types from RFC 2616.
+var octetTypes [256]octetType
+
+type octetType byte
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET =
+ // CHAR =
+ // CTL =
+ // CR =
+ // LF =
+ // SP =
+ // HT =
+ // <"> =
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT =
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*
+ // qdtext = >
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+// Copy returns a shallow copy of the header.
+func Copy(header http.Header) http.Header {
+ h := make(http.Header)
+ for k, vs := range header {
+ h[k] = vs
+ }
+ return h
+}
+
+var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC}
+
+// ParseTime parses the header as time. The zero value is returned if the
+// header is not present or there is an error parsing the
+// header.
+func ParseTime(header http.Header, key string) time.Time {
+ if s := header.Get(key); s != "" {
+ for _, layout := range timeLayouts {
+ if t, err := time.Parse(layout, s); err == nil {
+ return t.UTC()
+ }
+ }
+ }
+ return time.Time{}
+}
+
+// ParseList parses a comma separated list of values. Commas are ignored in
+// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is
+// trimmed.
+func ParseList(header http.Header, key string) []string {
+ var result []string
+ for _, s := range header[http.CanonicalHeaderKey(key)] {
+ begin := 0
+ end := 0
+ escape := false
+ quote := false
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ end = i + 1
+ case quote:
+ switch b {
+ case '\\':
+ escape = true
+ case '"':
+ quote = false
+ }
+ end = i + 1
+ case b == '"':
+ quote = true
+ end = i + 1
+ case octetTypes[b]&isSpace != 0:
+ if begin == end {
+ begin = i + 1
+ end = begin
+ }
+ case b == ',':
+ if begin < end {
+ result = append(result, s[begin:end])
+ }
+ begin = i + 1
+ end = begin
+ default:
+ end = i + 1
+ }
+ }
+ if begin < end {
+ result = append(result, s[begin:end])
+ }
+ }
+ return result
+}
+
+// ParseValueAndParams parses a comma separated list of values with optional
+// semicolon separated name-value pairs. Content-Type and Content-Disposition
+// headers are in this format.
+func ParseValueAndParams(header http.Header, key string) (string, map[string]string) {
+ return parseValueAndParams(header.Get(key))
+}
+
+func parseValueAndParams(s string) (value string, params map[string]string) {
+ params = make(map[string]string)
+ value, s = expectTokenSlash(s)
+ if value == "" {
+ return
+ }
+ value = strings.ToLower(value)
+ s = skipSpace(s)
+ for strings.HasPrefix(s, ";") {
+ var pkey string
+ pkey, s = expectToken(skipSpace(s[1:]))
+ if pkey == "" {
+ return
+ }
+ if !strings.HasPrefix(s, "=") {
+ return
+ }
+ var pvalue string
+ pvalue, s = expectTokenOrQuoted(s[1:])
+ if pvalue == "" {
+ return
+ }
+ pkey = strings.ToLower(pkey)
+ params[pkey] = pvalue
+ s = skipSpace(s)
+ }
+ return
+}
+
+// AcceptSpec ...
+type AcceptSpec struct {
+ Value string
+ Q float64
+}
+
+// ParseAccept2 ...
+func ParseAccept2(header http.Header, key string) (specs []AcceptSpec) {
+ for _, en := range ParseList(header, key) {
+ v, p := parseValueAndParams(en)
+ var spec AcceptSpec
+ spec.Value = v
+ spec.Q = 1.0
+ if p != nil {
+ if q, ok := p["q"]; ok {
+ spec.Q, _ = expectQuality(q)
+ }
+ }
+ if spec.Q < 0.0 {
+ continue
+ }
+ specs = append(specs, spec)
+ }
+
+ return
+}
+
+// ParseAccept parses Accept* headers.
+func ParseAccept(header http.Header, key string) []AcceptSpec {
+ var specs []AcceptSpec
+loop:
+ for _, s := range header[key] {
+ for {
+ var spec AcceptSpec
+ spec.Value, s = expectTokenSlash(s)
+ if spec.Value == "" {
+ continue loop
+ }
+ spec.Q = 1.0
+ s = skipSpace(s)
+ if strings.HasPrefix(s, ";") {
+ s = skipSpace(s[1:])
+ for !strings.HasPrefix(s, "q=") && s != "" && !strings.HasPrefix(s, ",") {
+ s = skipSpace(s[1:])
+ }
+ if strings.HasPrefix(s, "q=") {
+ spec.Q, s = expectQuality(s[2:])
+ if spec.Q < 0.0 {
+ continue loop
+ }
+ }
+ }
+
+ specs = append(specs, spec)
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ",") {
+ continue loop
+ }
+ s = skipSpace(s[1:])
+ }
+ }
+
+ return specs
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isToken == 0 {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectTokenSlash(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ b := s[i]
+ if (octetTypes[b]&isToken == 0) && b != '/' {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectQuality(s string) (q float64, rest string) {
+ switch {
+ case len(s) == 0:
+ return -1, ""
+ case s[0] == '0':
+ // q is already 0
+ s = s[1:]
+ case s[0] == '1':
+ s = s[1:]
+ q = 1
+ case s[0] == '.':
+ // q is already 0
+ default:
+ return -1, ""
+ }
+ if !strings.HasPrefix(s, ".") {
+ return q, s
+ }
+ s = s[1:]
+ i := 0
+ n := 0
+ d := 1
+ for ; i < len(s); i++ {
+ b := s[i]
+ if b < '0' || b > '9' {
+ break
+ }
+ n = n*10 + int(b) - '0'
+ d *= 10
+ }
+ return q + float64(n)/float64(d), s[i:]
+}
+
+func expectTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return expectToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i++; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/negotiate.go b/vendor/github.com/go-openapi/runtime/middleware/negotiate.go
new file mode 100644
index 00000000..a9b6f27d
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/negotiate.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// this file was taken from the github.com/golang/gddo repository
+
+package middleware
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/go-openapi/runtime/middleware/header"
+)
+
+// NegotiateContentEncoding returns the best offered content encoding for the
+// request's Accept-Encoding header. If two offers match with equal weight and
+// then the offer earlier in the list is preferred. If no offers are
+// acceptable, then "" is returned.
+func NegotiateContentEncoding(r *http.Request, offers []string) string {
+ bestOffer := "identity"
+ bestQ := -1.0
+ specs := header.ParseAccept(r.Header, "Accept-Encoding")
+ for _, offer := range offers {
+ for _, spec := range specs {
+ if spec.Q > bestQ &&
+ (spec.Value == "*" || spec.Value == offer) {
+ bestQ = spec.Q
+ bestOffer = offer
+ }
+ }
+ }
+ if bestQ == 0 {
+ bestOffer = ""
+ }
+ return bestOffer
+}
+
+// NegotiateContentType returns the best offered content type for the request's
+// Accept header. If two offers match with equal weight, then the more specific
+// offer is preferred. For example, text/* trumps */*. If two offers match
+// with equal weight and specificity, then the offer earlier in the list is
+// preferred. If no offers match, then defaultOffer is returned.
+func NegotiateContentType(r *http.Request, offers []string, defaultOffer string) string {
+ bestOffer := defaultOffer
+ bestQ := -1.0
+ bestWild := 3
+ specs := header.ParseAccept(r.Header, "Accept")
+ for _, rawOffer := range offers {
+ offer := normalizeOffer(rawOffer)
+ // No Accept header: just return the first offer.
+ if len(specs) == 0 {
+ return rawOffer
+ }
+ for _, spec := range specs {
+ switch {
+ case spec.Q == 0.0:
+ // ignore
+ case spec.Q < bestQ:
+ // better match found
+ case spec.Value == "*/*":
+ if spec.Q > bestQ || bestWild > 2 {
+ bestQ = spec.Q
+ bestWild = 2
+ bestOffer = rawOffer
+ }
+ case strings.HasSuffix(spec.Value, "/*"):
+ if strings.HasPrefix(offer, spec.Value[:len(spec.Value)-1]) &&
+ (spec.Q > bestQ || bestWild > 1) {
+ bestQ = spec.Q
+ bestWild = 1
+ bestOffer = rawOffer
+ }
+ default:
+ if spec.Value == offer &&
+ (spec.Q > bestQ || bestWild > 0) {
+ bestQ = spec.Q
+ bestWild = 0
+ bestOffer = rawOffer
+ }
+ }
+ }
+ }
+ return bestOffer
+}
+
+func normalizeOffers(orig []string) (norm []string) {
+ for _, o := range orig {
+ norm = append(norm, normalizeOffer(o))
+ }
+ return
+}
+
+func normalizeOffer(orig string) string {
+ return strings.SplitN(orig, ";", 2)[0]
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go
new file mode 100644
index 00000000..bc6942a0
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go
@@ -0,0 +1,67 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middleware
+
+import (
+ "net/http"
+
+ "github.com/go-openapi/runtime"
+)
+
+type errorResp struct {
+ code int
+ response interface{}
+ headers http.Header
+}
+
+func (e *errorResp) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
+ for k, v := range e.headers {
+ for _, val := range v {
+ rw.Header().Add(k, val)
+ }
+ }
+ if e.code > 0 {
+ rw.WriteHeader(e.code)
+ } else {
+ rw.WriteHeader(http.StatusInternalServerError)
+ }
+ if err := producer.Produce(rw, e.response); err != nil {
+ Logger.Printf("failed to write error response: %v", err)
+ }
+}
+
+// NotImplemented the error response when the response is not implemented
+func NotImplemented(message string) Responder {
+ return Error(http.StatusNotImplemented, message)
+}
+
+// Error creates a generic responder for returning errors, the data will be serialized
+// with the matching producer for the request
+func Error(code int, data interface{}, headers ...http.Header) Responder {
+ var hdr http.Header
+ for _, h := range headers {
+ for k, v := range h {
+ if hdr == nil {
+ hdr = make(http.Header)
+ }
+ hdr[k] = v
+ }
+ }
+ return &errorResp{
+ code: code,
+ response: data,
+ headers: hdr,
+ }
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/operation.go b/vendor/github.com/go-openapi/runtime/middleware/operation.go
new file mode 100644
index 00000000..1175a63c
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/operation.go
@@ -0,0 +1,30 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middleware
+
+import "net/http"
+
+// NewOperationExecutor creates a context aware middleware that handles the operations after routing
+func NewOperationExecutor(ctx *Context) http.Handler {
+ return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ // use context to lookup routes
+ route, rCtx, _ := ctx.RouteInfo(r)
+ if rCtx != nil {
+ r = rCtx
+ }
+
+ route.Handler.ServeHTTP(rw, r)
+ })
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/vendor/github.com/go-openapi/runtime/middleware/parameter.go
new file mode 100644
index 00000000..9c3353a9
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/parameter.go
@@ -0,0 +1,491 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middleware
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/http"
+ "reflect"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+
+ "github.com/go-openapi/runtime"
+)
+
+const defaultMaxMemory = 32 << 20
+
+const (
+ typeString = "string"
+ typeArray = "array"
+)
+
+var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
+
+func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder {
+ binder := new(untypedParamBinder)
+ binder.Name = param.Name
+ binder.parameter = ¶m
+ binder.formats = formats
+ if param.In != "body" {
+ binder.validator = validate.NewParamValidator(¶m, formats)
+ } else {
+ binder.validator = validate.NewSchemaValidator(param.Schema, spec, param.Name, formats)
+ }
+
+ return binder
+}
+
+type untypedParamBinder struct {
+ parameter *spec.Parameter
+ formats strfmt.Registry
+ Name string
+ validator validate.EntityValidator
+}
+
+func (p *untypedParamBinder) Type() reflect.Type {
+ return p.typeForSchema(p.parameter.Type, p.parameter.Format, p.parameter.Items)
+}
+
+func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items) reflect.Type {
+ switch tpe {
+ case "boolean":
+ return reflect.TypeOf(true)
+
+ case typeString:
+ if tt, ok := p.formats.GetType(format); ok {
+ return tt
+ }
+ return reflect.TypeOf("")
+
+ case "integer":
+ switch format {
+ case "int8":
+ return reflect.TypeOf(int8(0))
+ case "int16":
+ return reflect.TypeOf(int16(0))
+ case "int32":
+ return reflect.TypeOf(int32(0))
+ case "int64":
+ return reflect.TypeOf(int64(0))
+ default:
+ return reflect.TypeOf(int64(0))
+ }
+
+ case "number":
+ switch format {
+ case "float":
+ return reflect.TypeOf(float32(0))
+ case "double":
+ return reflect.TypeOf(float64(0))
+ }
+
+ case typeArray:
+ if items == nil {
+ return nil
+ }
+ itemsType := p.typeForSchema(items.Type, items.Format, items.Items)
+ if itemsType == nil {
+ return nil
+ }
+ return reflect.MakeSlice(reflect.SliceOf(itemsType), 0, 0).Type()
+
+ case "file":
+ return reflect.TypeOf(&runtime.File{}).Elem()
+
+ case "object":
+ return reflect.TypeOf(map[string]interface{}{})
+ }
+ return nil
+}
+
+func (p *untypedParamBinder) allowsMulti() bool {
+ return p.parameter.In == "query" || p.parameter.In == "formData"
+}
+
+func (p *untypedParamBinder) readValue(values runtime.Gettable, target reflect.Value) ([]string, bool, bool, error) {
+ name, in, cf, tpe := p.parameter.Name, p.parameter.In, p.parameter.CollectionFormat, p.parameter.Type
+ if tpe == typeArray {
+ if cf == "multi" {
+ if !p.allowsMulti() {
+ return nil, false, false, errors.InvalidCollectionFormat(name, in, cf)
+ }
+ vv, hasKey, _ := values.GetOK(name)
+ return vv, false, hasKey, nil
+ }
+
+ v, hk, hv := values.GetOK(name)
+ if !hv {
+ return nil, false, hk, nil
+ }
+ d, c, e := p.readFormattedSliceFieldValue(v[len(v)-1], target)
+ return d, c, hk, e
+ }
+
+ vv, hk, _ := values.GetOK(name)
+ return vv, false, hk, nil
+}
+
+func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, target reflect.Value) error {
+ // fmt.Println("binding", p.name, "as", p.Type())
+ switch p.parameter.In {
+ case "query":
+ data, custom, hasKey, err := p.readValue(runtime.Values(request.URL.Query()), target)
+ if err != nil {
+ return err
+ }
+ if custom {
+ return nil
+ }
+
+ return p.bindValue(data, hasKey, target)
+
+ case "header":
+ data, custom, hasKey, err := p.readValue(runtime.Values(request.Header), target)
+ if err != nil {
+ return err
+ }
+ if custom {
+ return nil
+ }
+ return p.bindValue(data, hasKey, target)
+
+ case "path":
+ data, custom, hasKey, err := p.readValue(routeParams, target)
+ if err != nil {
+ return err
+ }
+ if custom {
+ return nil
+ }
+ return p.bindValue(data, hasKey, target)
+
+ case "formData":
+ var err error
+ var mt string
+
+ mt, _, e := runtime.ContentType(request.Header)
+ if e != nil {
+ // because of the interface conversion go thinks the error is not nil
+ // so we first check for nil and then set the err var if it's not nil
+ err = e
+ }
+
+ if err != nil {
+ return errors.InvalidContentType("", []string{"multipart/form-data", "application/x-www-form-urlencoded"})
+ }
+
+ if mt != "multipart/form-data" && mt != "application/x-www-form-urlencoded" {
+ return errors.InvalidContentType(mt, []string{"multipart/form-data", "application/x-www-form-urlencoded"})
+ }
+
+ if mt == "multipart/form-data" {
+ if err = request.ParseMultipartForm(defaultMaxMemory); err != nil {
+ return errors.NewParseError(p.Name, p.parameter.In, "", err)
+ }
+ }
+
+ if err = request.ParseForm(); err != nil {
+ return errors.NewParseError(p.Name, p.parameter.In, "", err)
+ }
+
+ if p.parameter.Type == "file" {
+ file, header, ffErr := request.FormFile(p.parameter.Name)
+ if ffErr != nil {
+ if p.parameter.Required {
+ return errors.NewParseError(p.Name, p.parameter.In, "", ffErr)
+ }
+
+ return nil
+ }
+
+ target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header}))
+ return nil
+ }
+
+ if request.MultipartForm != nil {
+ data, custom, hasKey, rvErr := p.readValue(runtime.Values(request.MultipartForm.Value), target)
+ if rvErr != nil {
+ return rvErr
+ }
+ if custom {
+ return nil
+ }
+ return p.bindValue(data, hasKey, target)
+ }
+ data, custom, hasKey, err := p.readValue(runtime.Values(request.PostForm), target)
+ if err != nil {
+ return err
+ }
+ if custom {
+ return nil
+ }
+ return p.bindValue(data, hasKey, target)
+
+ case "body":
+ newValue := reflect.New(target.Type())
+ if !runtime.HasBody(request) {
+ if p.parameter.Default != nil {
+ target.Set(reflect.ValueOf(p.parameter.Default))
+ }
+
+ return nil
+ }
+ if err := consumer.Consume(request.Body, newValue.Interface()); err != nil {
+ if err == io.EOF && p.parameter.Default != nil {
+ target.Set(reflect.ValueOf(p.parameter.Default))
+ return nil
+ }
+ tpe := p.parameter.Type
+ if p.parameter.Format != "" {
+ tpe = p.parameter.Format
+ }
+ return errors.InvalidType(p.Name, p.parameter.In, tpe, nil)
+ }
+ target.Set(reflect.Indirect(newValue))
+ return nil
+ default:
+ return errors.New(500, fmt.Sprintf("invalid parameter location %q", p.parameter.In))
+ }
+}
+
+func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflect.Value) error {
+ if p.parameter.Type == typeArray {
+ return p.setSliceFieldValue(target, p.parameter.Default, data, hasKey)
+ }
+ var d string
+ if len(data) > 0 {
+ d = data[len(data)-1]
+ }
+ return p.setFieldValue(target, p.parameter.Default, d, hasKey)
+}
+
+func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error { //nolint:gocyclo
+ tpe := p.parameter.Type
+ if p.parameter.Format != "" {
+ tpe = p.parameter.Format
+ }
+
+ if (!hasKey || (!p.parameter.AllowEmptyValue && data == "")) && p.parameter.Required && p.parameter.Default == nil {
+ return errors.Required(p.Name, p.parameter.In, data)
+ }
+
+ ok, err := p.tryUnmarshaler(target, defaultValue, data)
+ if err != nil {
+ return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
+ }
+ if ok {
+ return nil
+ }
+
+ defVal := reflect.Zero(target.Type())
+ if defaultValue != nil {
+ defVal = reflect.ValueOf(defaultValue)
+ }
+
+ if tpe == "byte" {
+ if data == "" {
+ if target.CanSet() {
+ target.SetBytes(defVal.Bytes())
+ }
+ return nil
+ }
+
+ b, err := base64.StdEncoding.DecodeString(data)
+ if err != nil {
+ b, err = base64.URLEncoding.DecodeString(data)
+ if err != nil {
+ return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
+ }
+ }
+ if target.CanSet() {
+ target.SetBytes(b)
+ }
+ return nil
+ }
+
+ switch target.Kind() { //nolint:exhaustive // we want to check only types that map from a swagger parameter
+ case reflect.Bool:
+ if data == "" {
+ if target.CanSet() {
+ target.SetBool(defVal.Bool())
+ }
+ return nil
+ }
+ b, err := swag.ConvertBool(data)
+ if err != nil {
+ return err
+ }
+ if target.CanSet() {
+ target.SetBool(b)
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if data == "" {
+ if target.CanSet() {
+ rd := defVal.Convert(reflect.TypeOf(int64(0)))
+ target.SetInt(rd.Int())
+ }
+ return nil
+ }
+ i, err := strconv.ParseInt(data, 10, 64)
+ if err != nil {
+ return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
+ }
+ if target.OverflowInt(i) {
+ return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
+ }
+ if target.CanSet() {
+ target.SetInt(i)
+ }
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if data == "" {
+ if target.CanSet() {
+ rd := defVal.Convert(reflect.TypeOf(uint64(0)))
+ target.SetUint(rd.Uint())
+ }
+ return nil
+ }
+ u, err := strconv.ParseUint(data, 10, 64)
+ if err != nil {
+ return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
+ }
+ if target.OverflowUint(u) {
+ return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
+ }
+ if target.CanSet() {
+ target.SetUint(u)
+ }
+
+ case reflect.Float32, reflect.Float64:
+ if data == "" {
+ if target.CanSet() {
+ rd := defVal.Convert(reflect.TypeOf(float64(0)))
+ target.SetFloat(rd.Float())
+ }
+ return nil
+ }
+ f, err := strconv.ParseFloat(data, 64)
+ if err != nil {
+ return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
+ }
+ if target.OverflowFloat(f) {
+ return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
+ }
+ if target.CanSet() {
+ target.SetFloat(f)
+ }
+
+ case reflect.String:
+ value := data
+ if value == "" {
+ value = defVal.String()
+ }
+ // validate string
+ if target.CanSet() {
+ target.SetString(value)
+ }
+
+ case reflect.Ptr:
+ if data == "" && defVal.Kind() == reflect.Ptr {
+ if target.CanSet() {
+ target.Set(defVal)
+ }
+ return nil
+ }
+ newVal := reflect.New(target.Type().Elem())
+ if err := p.setFieldValue(reflect.Indirect(newVal), defVal, data, hasKey); err != nil {
+ return err
+ }
+ if target.CanSet() {
+ target.Set(newVal)
+ }
+
+ default:
+ return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
+ }
+ return nil
+}
+
+func (p *untypedParamBinder) tryUnmarshaler(target reflect.Value, defaultValue interface{}, data string) (bool, error) {
+ if !target.CanSet() {
+ return false, nil
+ }
+ // When a type implements encoding.TextUnmarshaler we'll use that instead of reflecting some more
+ if reflect.PtrTo(target.Type()).Implements(textUnmarshalType) {
+ if defaultValue != nil && len(data) == 0 {
+ target.Set(reflect.ValueOf(defaultValue))
+ return true, nil
+ }
+ value := reflect.New(target.Type())
+ if err := value.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(data)); err != nil {
+ return true, err
+ }
+ target.Set(reflect.Indirect(value))
+ return true, nil
+ }
+ return false, nil
+}
+
+func (p *untypedParamBinder) readFormattedSliceFieldValue(data string, target reflect.Value) ([]string, bool, error) {
+ ok, err := p.tryUnmarshaler(target, p.parameter.Default, data)
+ if err != nil {
+ return nil, true, err
+ }
+ if ok {
+ return nil, true, nil
+ }
+
+ return swag.SplitByFormat(data, p.parameter.CollectionFormat), false, nil
+}
+
+func (p *untypedParamBinder) setSliceFieldValue(target reflect.Value, defaultValue interface{}, data []string, hasKey bool) error {
+ sz := len(data)
+ if (!hasKey || (!p.parameter.AllowEmptyValue && (sz == 0 || (sz == 1 && data[0] == "")))) && p.parameter.Required && defaultValue == nil {
+ return errors.Required(p.Name, p.parameter.In, data)
+ }
+
+ defVal := reflect.Zero(target.Type())
+ if defaultValue != nil {
+ defVal = reflect.ValueOf(defaultValue)
+ }
+
+ if !target.CanSet() {
+ return nil
+ }
+ if sz == 0 {
+ target.Set(defVal)
+ return nil
+ }
+
+ value := reflect.MakeSlice(reflect.SliceOf(target.Type().Elem()), sz, sz)
+
+ for i := 0; i < sz; i++ {
+ if err := p.setFieldValue(value.Index(i), nil, data[i], hasKey); err != nil {
+ return err
+ }
+ }
+
+ target.Set(value)
+
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go b/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go
new file mode 100644
index 00000000..ef75e744
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go
@@ -0,0 +1,80 @@
+package middleware
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "net/http"
+ "path"
+)
+
+// RapiDocOpts configures the RapiDoc middlewares
+type RapiDocOpts struct {
+ // BasePath for the UI, defaults to: /
+ BasePath string
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
+ Path string
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
+ SpecURL string
+
+ // Title for the documentation site, default to: API documentation
+ Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+
+ // RapiDocURL points to the js asset that generates the rapidoc site.
+ //
+ // Defaults to https://unpkg.com/rapidoc/dist/rapidoc-min.js
+ RapiDocURL string
+}
+
+func (r *RapiDocOpts) EnsureDefaults() {
+ common := toCommonUIOptions(r)
+ common.EnsureDefaults()
+ fromCommonToAnyOptions(common, r)
+
+ // rapidoc-specifics
+ if r.RapiDocURL == "" {
+ r.RapiDocURL = rapidocLatest
+ }
+ if r.Template == "" {
+ r.Template = rapidocTemplate
+ }
+}
+
+// RapiDoc creates a middleware to serve a documentation site for a swagger spec.
+//
+// This allows for altering the spec before starting the http listener.
+func RapiDoc(opts RapiDocOpts, next http.Handler) http.Handler {
+ opts.EnsureDefaults()
+
+ pth := path.Join(opts.BasePath, opts.Path)
+ tmpl := template.Must(template.New("rapidoc").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
+
+ return serveUI(pth, assets.Bytes(), next)
+}
+
+const (
+ rapidocLatest = "https://unpkg.com/rapidoc/dist/rapidoc-min.js"
+ rapidocTemplate = `
+
+
+ {{ .Title }}
+
+
+
+
+
+
+
+`
+)
diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go
new file mode 100644
index 00000000..b96b01e7
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/redoc.go
@@ -0,0 +1,94 @@
+package middleware
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "net/http"
+ "path"
+)
+
+// RedocOpts configures the Redoc middlewares
+type RedocOpts struct {
+ // BasePath for the UI, defaults to: /
+ BasePath string
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
+ Path string
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
+ SpecURL string
+
+ // Title for the documentation site, default to: API documentation
+ Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+
+ // RedocURL points to the js that generates the redoc site.
+ //
+ // Defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js
+ RedocURL string
+}
+
+// EnsureDefaults in case some options are missing
+func (r *RedocOpts) EnsureDefaults() {
+ common := toCommonUIOptions(r)
+ common.EnsureDefaults()
+ fromCommonToAnyOptions(common, r)
+
+ // redoc-specifics
+ if r.RedocURL == "" {
+ r.RedocURL = redocLatest
+ }
+ if r.Template == "" {
+ r.Template = redocTemplate
+ }
+}
+
+// Redoc creates a middleware to serve a documentation site for a swagger spec.
+//
+// This allows for altering the spec before starting the http listener.
+func Redoc(opts RedocOpts, next http.Handler) http.Handler {
+ opts.EnsureDefaults()
+
+ pth := path.Join(opts.BasePath, opts.Path)
+ tmpl := template.Must(template.New("redoc").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
+
+ return serveUI(pth, assets.Bytes(), next)
+}
+
+const (
+ redocLatest = "https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js"
+ redocTemplate = `
+
+
+ {{ .Title }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+`
+)
diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go
new file mode 100644
index 00000000..82e14366
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/request.go
@@ -0,0 +1,117 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middleware
+
+import (
+ "net/http"
+ "reflect"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/runtime/logger"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+)
+
+// UntypedRequestBinder binds and validates the data from a http request
+type UntypedRequestBinder struct {
+ Spec *spec.Swagger
+ Parameters map[string]spec.Parameter
+ Formats strfmt.Registry
+ paramBinders map[string]*untypedParamBinder
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
+}
+
+// NewUntypedRequestBinder creates a new binder for reading a request.
+func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *UntypedRequestBinder {
+ binders := make(map[string]*untypedParamBinder)
+ for fieldName, param := range parameters {
+ binders[fieldName] = newUntypedParamBinder(param, spec, formats)
+ }
+ return &UntypedRequestBinder{
+ Parameters: parameters,
+ paramBinders: binders,
+ Spec: spec,
+ Formats: formats,
+ debugLogf: debugLogfFunc(nil),
+ }
+}
+
+// Bind perform the databinding and validation
+func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data interface{}) error {
+ val := reflect.Indirect(reflect.ValueOf(data))
+ isMap := val.Kind() == reflect.Map
+ var result []error
+ o.debugLogf("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath())
+ for fieldName, param := range o.Parameters {
+ binder := o.paramBinders[fieldName]
+ o.debugLogf("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath())
+ var target reflect.Value
+ if !isMap {
+ binder.Name = fieldName
+ target = val.FieldByName(fieldName)
+ }
+
+ if isMap {
+ tpe := binder.Type()
+ if tpe == nil {
+ if param.Schema.Type.Contains(typeArray) {
+ tpe = reflect.TypeOf([]interface{}{})
+ } else {
+ tpe = reflect.TypeOf(map[string]interface{}{})
+ }
+ }
+ target = reflect.Indirect(reflect.New(tpe))
+ }
+
+ if !target.IsValid() {
+ result = append(result, errors.New(500, "parameter name %q is an unknown field", binder.Name))
+ continue
+ }
+
+ if err := binder.Bind(request, routeParams, consumer, target); err != nil {
+ result = append(result, err)
+ continue
+ }
+
+ if binder.validator != nil {
+ rr := binder.validator.Validate(target.Interface())
+ if rr != nil && rr.HasErrors() {
+ result = append(result, rr.AsError())
+ }
+ }
+
+ if isMap {
+ val.SetMapIndex(reflect.ValueOf(param.Name), target)
+ }
+ }
+
+ if len(result) > 0 {
+ return errors.CompositeValidationError(result...)
+ }
+
+ return nil
+}
+
+// SetLogger allows for injecting a logger to catch debug entries.
+//
+// The logger is enabled in DEBUG mode only.
+func (o *UntypedRequestBinder) SetLogger(lg logger.Logger) {
+ o.debugLogf = debugLogfFunc(lg)
+}
+
+func (o *UntypedRequestBinder) setDebugLogf(fn func(string, ...any)) {
+ o.debugLogf = fn
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go
new file mode 100644
index 00000000..3a6aee90
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/router.go
@@ -0,0 +1,531 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middleware
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ fpath "path"
+ "regexp"
+ "strings"
+
+ "github.com/go-openapi/runtime/logger"
+ "github.com/go-openapi/runtime/security"
+ "github.com/go-openapi/swag"
+
+ "github.com/go-openapi/analysis"
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/loads"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/runtime/middleware/denco"
+)
+
+// RouteParam is a object to capture route params in a framework agnostic way.
+// implementations of the muxer should use these route params to communicate with the
+// swagger framework
+type RouteParam struct {
+ Name string
+ Value string
+}
+
+// RouteParams the collection of route params
+type RouteParams []RouteParam
+
+// Get gets the value for the route param for the specified key
+func (r RouteParams) Get(name string) string {
+ vv, _, _ := r.GetOK(name)
+ if len(vv) > 0 {
+ return vv[len(vv)-1]
+ }
+ return ""
+}
+
+// GetOK gets the value but also returns booleans to indicate if a key or value
+// is present. This aids in validation and satisfies an interface in use there
+//
+// The returned values are: data, has key, has value
+func (r RouteParams) GetOK(name string) ([]string, bool, bool) {
+ for _, p := range r {
+ if p.Name == name {
+ return []string{p.Value}, true, p.Value != ""
+ }
+ }
+ return nil, false, false
+}
+
+// NewRouter creates a new context-aware router middleware
+func NewRouter(ctx *Context, next http.Handler) http.Handler {
+ if ctx.router == nil {
+ ctx.router = DefaultRouter(ctx.spec, ctx.api, WithDefaultRouterLoggerFunc(ctx.debugLogf))
+ }
+
+ return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ if _, rCtx, ok := ctx.RouteInfo(r); ok {
+ next.ServeHTTP(rw, rCtx)
+ return
+ }
+
+ // Not found, check if it exists in the other methods first
+ if others := ctx.AllowedMethods(r); len(others) > 0 {
+ ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others))
+ return
+ }
+
+ ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath()))
+ })
+}
+
+// RoutableAPI represents an interface for things that can serve
+// as a provider of implementations for the swagger router
+type RoutableAPI interface {
+ HandlerFor(string, string) (http.Handler, bool)
+ ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error)
+ ConsumersFor([]string) map[string]runtime.Consumer
+ ProducersFor([]string) map[string]runtime.Producer
+ AuthenticatorsFor(map[string]spec.SecurityScheme) map[string]runtime.Authenticator
+ Authorizer() runtime.Authorizer
+ Formats() strfmt.Registry
+ DefaultProduces() string
+ DefaultConsumes() string
+}
+
+// Router represents a swagger-aware router
+type Router interface {
+ Lookup(method, path string) (*MatchedRoute, bool)
+ OtherMethods(method, path string) []string
+}
+
+type defaultRouteBuilder struct {
+ spec *loads.Document
+ analyzer *analysis.Spec
+ api RoutableAPI
+ records map[string][]denco.Record
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
+}
+
+type defaultRouter struct {
+ spec *loads.Document
+ routers map[string]*denco.Router
+ debugLogf func(string, ...any) // a logging function to debug context and all components using it
+}
+
+func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) *defaultRouteBuilder {
+ var o defaultRouterOpts
+ for _, apply := range opts {
+ apply(&o)
+ }
+ if o.debugLogf == nil {
+ o.debugLogf = debugLogfFunc(nil) // defaults to standard logger
+ }
+
+ return &defaultRouteBuilder{
+ spec: spec,
+ analyzer: analysis.New(spec.Spec()),
+ api: api,
+ records: make(map[string][]denco.Record),
+ debugLogf: o.debugLogf,
+ }
+}
+
+// DefaultRouterOpt allows to inject optional behavior to the default router.
+type DefaultRouterOpt func(*defaultRouterOpts)
+
+type defaultRouterOpts struct {
+ debugLogf func(string, ...any)
+}
+
+// WithDefaultRouterLogger sets the debug logger for the default router.
+//
+// This is enabled only in DEBUG mode.
+func WithDefaultRouterLogger(lg logger.Logger) DefaultRouterOpt {
+ return func(o *defaultRouterOpts) {
+ o.debugLogf = debugLogfFunc(lg)
+ }
+}
+
+// WithDefaultRouterLoggerFunc sets a logging debug method for the default router.
+func WithDefaultRouterLoggerFunc(fn func(string, ...any)) DefaultRouterOpt {
+ return func(o *defaultRouterOpts) {
+ o.debugLogf = fn
+ }
+}
+
+// DefaultRouter creates a default implementation of the router
+func DefaultRouter(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) Router {
+ builder := newDefaultRouteBuilder(spec, api, opts...)
+ if spec != nil {
+ for method, paths := range builder.analyzer.Operations() {
+ for path, operation := range paths {
+ fp := fpath.Join(spec.BasePath(), path)
+ builder.debugLogf("adding route %s %s %q", method, fp, operation.ID)
+ builder.AddRoute(method, fp, operation)
+ }
+ }
+ }
+ return builder.Build()
+}
+
+// RouteAuthenticator is an authenticator that can compose several authenticators together.
+// It also knows when it contains an authenticator that allows for anonymous pass through.
+// Contains a group of 1 or more authenticators that have a logical AND relationship
+type RouteAuthenticator struct {
+ Authenticator map[string]runtime.Authenticator
+ Schemes []string
+ Scopes map[string][]string
+ allScopes []string
+ commonScopes []string
+ allowAnonymous bool
+}
+
+func (ra *RouteAuthenticator) AllowsAnonymous() bool {
+ return ra.allowAnonymous
+}
+
+// AllScopes returns a list of unique scopes that is the combination
+// of all the scopes in the requirements
+func (ra *RouteAuthenticator) AllScopes() []string {
+ return ra.allScopes
+}
+
+// CommonScopes returns a list of unique scopes that are common in all the
+// scopes in the requirements
+func (ra *RouteAuthenticator) CommonScopes() []string {
+ return ra.commonScopes
+}
+
+// Authenticate Authenticator interface implementation
+func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) {
+ if ra.allowAnonymous {
+ route.Authenticator = ra
+ return true, nil, nil
+ }
+ // iterate in proper order
+ var lastResult interface{}
+ for _, scheme := range ra.Schemes {
+ if authenticator, ok := ra.Authenticator[scheme]; ok {
+ applies, princ, err := authenticator.Authenticate(&security.ScopedAuthRequest{
+ Request: req,
+ RequiredScopes: ra.Scopes[scheme],
+ })
+ if !applies {
+ return false, nil, nil
+ }
+ if err != nil {
+ route.Authenticator = ra
+ return true, nil, err
+ }
+ lastResult = princ
+ }
+ }
+ route.Authenticator = ra
+ return true, lastResult, nil
+}
+
+func stringSliceUnion(slices ...[]string) []string {
+ unique := make(map[string]struct{})
+ var result []string
+ for _, slice := range slices {
+ for _, entry := range slice {
+ if _, ok := unique[entry]; ok {
+ continue
+ }
+ unique[entry] = struct{}{}
+ result = append(result, entry)
+ }
+ }
+ return result
+}
+
+func stringSliceIntersection(slices ...[]string) []string {
+ unique := make(map[string]int)
+ var intersection []string
+
+ total := len(slices)
+ var emptyCnt int
+ for _, slice := range slices {
+ if len(slice) == 0 {
+ emptyCnt++
+ continue
+ }
+
+ for _, entry := range slice {
+ unique[entry]++
+ if unique[entry] == total-emptyCnt { // this entry appeared in all the non-empty slices
+ intersection = append(intersection, entry)
+ }
+ }
+ }
+
+ return intersection
+}
+
+// RouteAuthenticators represents a group of authenticators that represent a logical OR
+type RouteAuthenticators []RouteAuthenticator
+
+// AllowsAnonymous returns true when there is an authenticator that means optional auth
+func (ras RouteAuthenticators) AllowsAnonymous() bool {
+ for _, ra := range ras {
+ if ra.AllowsAnonymous() {
+ return true
+ }
+ }
+ return false
+}
+
+// Authenticate method implemention so this collection can be used as authenticator
+func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) {
+ var lastError error
+ var allowsAnon bool
+ var anonAuth RouteAuthenticator
+
+ for _, ra := range ras {
+ if ra.AllowsAnonymous() {
+ anonAuth = ra
+ allowsAnon = true
+ continue
+ }
+ applies, usr, err := ra.Authenticate(req, route)
+ if !applies || err != nil || usr == nil {
+ if err != nil {
+ lastError = err
+ }
+ continue
+ }
+ return applies, usr, nil
+ }
+
+ if allowsAnon && lastError == nil {
+ route.Authenticator = &anonAuth
+ return true, nil, lastError
+ }
+ return lastError != nil, nil, lastError
+}
+
+type routeEntry struct {
+ PathPattern string
+ BasePath string
+ Operation *spec.Operation
+ Consumes []string
+ Consumers map[string]runtime.Consumer
+ Produces []string
+ Producers map[string]runtime.Producer
+ Parameters map[string]spec.Parameter
+ Handler http.Handler
+ Formats strfmt.Registry
+ Binder *UntypedRequestBinder
+ Authenticators RouteAuthenticators
+ Authorizer runtime.Authorizer
+}
+
+// MatchedRoute represents the route that was matched in this request
+type MatchedRoute struct {
+ routeEntry
+ Params RouteParams
+ Consumer runtime.Consumer
+ Producer runtime.Producer
+ Authenticator *RouteAuthenticator
+}
+
+// HasAuth returns true when the route has a security requirement defined
+func (m *MatchedRoute) HasAuth() bool {
+ return len(m.Authenticators) > 0
+}
+
+// NeedsAuth returns true when the request still
+// needs to perform authentication
+func (m *MatchedRoute) NeedsAuth() bool {
+ return m.HasAuth() && m.Authenticator == nil
+}
+
+func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) {
+ mth := strings.ToUpper(method)
+ d.debugLogf("looking up route for %s %s", method, path)
+ if Debug {
+ if len(d.routers) == 0 {
+ d.debugLogf("there are no known routers")
+ }
+ for meth := range d.routers {
+ d.debugLogf("got a router for %s", meth)
+ }
+ }
+ if router, ok := d.routers[mth]; ok {
+ if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil {
+ if entry, ok := m.(*routeEntry); ok {
+ d.debugLogf("found a route for %s %s with %d parameters", method, path, len(entry.Parameters))
+ var params RouteParams
+ for _, p := range rp {
+ v, err := url.PathUnescape(p.Value)
+ if err != nil {
+ d.debugLogf("failed to escape %q: %v", p.Value, err)
+ v = p.Value
+ }
+ // a workaround to handle fragment/composing parameters until they are supported in denco router
+ // check if this parameter is a fragment within a path segment
+ if xpos := strings.Index(entry.PathPattern, fmt.Sprintf("{%s}", p.Name)) + len(p.Name) + 2; xpos < len(entry.PathPattern) && entry.PathPattern[xpos] != '/' {
+ // extract fragment parameters
+ ep := strings.Split(entry.PathPattern[xpos:], "/")[0]
+ pnames, pvalues := decodeCompositParams(p.Name, v, ep, nil, nil)
+ for i, pname := range pnames {
+ params = append(params, RouteParam{Name: pname, Value: pvalues[i]})
+ }
+ } else {
+ // use the parameter directly
+ params = append(params, RouteParam{Name: p.Name, Value: v})
+ }
+ }
+ return &MatchedRoute{routeEntry: *entry, Params: params}, true
+ }
+ } else {
+ d.debugLogf("couldn't find a route by path for %s %s", method, path)
+ }
+ } else {
+ d.debugLogf("couldn't find a route by method for %s %s", method, path)
+ }
+ return nil, false
+}
+
+func (d *defaultRouter) OtherMethods(method, path string) []string {
+ mn := strings.ToUpper(method)
+ var methods []string
+ for k, v := range d.routers {
+ if k != mn {
+ if _, _, ok := v.Lookup(fpath.Clean(path)); ok {
+ methods = append(methods, k)
+ continue
+ }
+ }
+ }
+ return methods
+}
+
+func (d *defaultRouter) SetLogger(lg logger.Logger) {
+ d.debugLogf = debugLogfFunc(lg)
+}
+
+// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco
+var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`)
+
+func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) {
+ pleft := strings.Index(pattern, "{")
+ names = append(names, name)
+ if pleft < 0 {
+ if strings.HasSuffix(value, pattern) {
+ values = append(values, value[:len(value)-len(pattern)])
+ } else {
+ values = append(values, "")
+ }
+ } else {
+ toskip := pattern[:pleft]
+ pright := strings.Index(pattern, "}")
+ vright := strings.Index(value, toskip)
+ if vright >= 0 {
+ values = append(values, value[:vright])
+ } else {
+ values = append(values, "")
+ value = ""
+ }
+ return decodeCompositParams(pattern[pleft+1:pright], value[vright+len(toskip):], pattern[pright+1:], names, values)
+ }
+ return names, values
+}
+
+func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Operation) {
+ mn := strings.ToUpper(method)
+
+ bp := fpath.Clean(d.spec.BasePath())
+ if len(bp) > 0 && bp[len(bp)-1] == '/' {
+ bp = bp[:len(bp)-1]
+ }
+
+ d.debugLogf("operation: %#v", *operation)
+ if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok {
+ consumes := d.analyzer.ConsumesFor(operation)
+ produces := d.analyzer.ProducesFor(operation)
+ parameters := d.analyzer.ParamsFor(method, strings.TrimPrefix(path, bp))
+
+ // add API defaults if not part of the spec
+ if defConsumes := d.api.DefaultConsumes(); defConsumes != "" && !swag.ContainsStringsCI(consumes, defConsumes) {
+ consumes = append(consumes, defConsumes)
+ }
+
+ if defProduces := d.api.DefaultProduces(); defProduces != "" && !swag.ContainsStringsCI(produces, defProduces) {
+ produces = append(produces, defProduces)
+ }
+
+ requestBinder := NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats())
+ requestBinder.setDebugLogf(d.debugLogf)
+ record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{
+ BasePath: bp,
+ PathPattern: path,
+ Operation: operation,
+ Handler: handler,
+ Consumes: consumes,
+ Produces: produces,
+ Consumers: d.api.ConsumersFor(normalizeOffers(consumes)),
+ Producers: d.api.ProducersFor(normalizeOffers(produces)),
+ Parameters: parameters,
+ Formats: d.api.Formats(),
+ Binder: requestBinder,
+ Authenticators: d.buildAuthenticators(operation),
+ Authorizer: d.api.Authorizer(),
+ })
+ d.records[mn] = append(d.records[mn], record)
+ }
+}
+
+func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators {
+ requirements := d.analyzer.SecurityRequirementsFor(operation)
+ auths := make([]RouteAuthenticator, 0, len(requirements))
+ for _, reqs := range requirements {
+ schemes := make([]string, 0, len(reqs))
+ scopes := make(map[string][]string, len(reqs))
+ scopeSlices := make([][]string, 0, len(reqs))
+ for _, req := range reqs {
+ schemes = append(schemes, req.Name)
+ scopes[req.Name] = req.Scopes
+ scopeSlices = append(scopeSlices, req.Scopes)
+ }
+
+ definitions := d.analyzer.SecurityDefinitionsForRequirements(reqs)
+ authenticators := d.api.AuthenticatorsFor(definitions)
+ auths = append(auths, RouteAuthenticator{
+ Authenticator: authenticators,
+ Schemes: schemes,
+ Scopes: scopes,
+ allScopes: stringSliceUnion(scopeSlices...),
+ commonScopes: stringSliceIntersection(scopeSlices...),
+ allowAnonymous: len(reqs) == 1 && reqs[0].Name == "",
+ })
+ }
+ return auths
+}
+
+func (d *defaultRouteBuilder) Build() *defaultRouter {
+ routers := make(map[string]*denco.Router)
+ for method, records := range d.records {
+ router := denco.New()
+ _ = router.Build(records)
+ routers[method] = router
+ }
+ return &defaultRouter{
+ spec: d.spec,
+ routers: routers,
+ debugLogf: d.debugLogf,
+ }
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/security.go b/vendor/github.com/go-openapi/runtime/middleware/security.go
new file mode 100644
index 00000000..2b061cae
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/security.go
@@ -0,0 +1,39 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middleware
+
+import "net/http"
+
+func newSecureAPI(ctx *Context, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ route, rCtx, _ := ctx.RouteInfo(r)
+ if rCtx != nil {
+ r = rCtx
+ }
+ if route != nil && !route.NeedsAuth() {
+ next.ServeHTTP(rw, r)
+ return
+ }
+
+ _, rCtx, err := ctx.Authorize(r, route)
+ if err != nil {
+ ctx.Respond(rw, r, route.Produces, route, err)
+ return
+ }
+ r = rCtx
+
+ next.ServeHTTP(rw, r)
+ })
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go
new file mode 100644
index 00000000..87e17e34
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/spec.go
@@ -0,0 +1,102 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middleware
+
+import (
+ "net/http"
+ "path"
+)
+
+const (
+ contentTypeHeader = "Content-Type"
+ applicationJSON = "application/json"
+)
+
+// SpecOption can be applied to the Spec serving middleware
+type SpecOption func(*specOptions)
+
+var defaultSpecOptions = specOptions{
+ Path: "",
+ Document: "swagger.json",
+}
+
+type specOptions struct {
+ Path string
+ Document string
+}
+
+func specOptionsWithDefaults(opts []SpecOption) specOptions {
+ o := defaultSpecOptions
+ for _, apply := range opts {
+ apply(&o)
+ }
+
+ return o
+}
+
+// Spec creates a middleware to serve a swagger spec as a JSON document.
+//
+// This allows for altering the spec before starting the http listener.
+//
+// The basePath argument indicates the path of the spec document (defaults to "/").
+// Additional SpecOption can be used to change the name of the document (defaults to "swagger.json").
+func Spec(basePath string, b []byte, next http.Handler, opts ...SpecOption) http.Handler {
+ if basePath == "" {
+ basePath = "/"
+ }
+ o := specOptionsWithDefaults(opts)
+ pth := path.Join(basePath, o.Path, o.Document)
+
+ return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ if path.Clean(r.URL.Path) == pth {
+ rw.Header().Set(contentTypeHeader, applicationJSON)
+ rw.WriteHeader(http.StatusOK)
+ _, _ = rw.Write(b)
+
+ return
+ }
+
+ if next != nil {
+ next.ServeHTTP(rw, r)
+
+ return
+ }
+
+ rw.Header().Set(contentTypeHeader, applicationJSON)
+ rw.WriteHeader(http.StatusNotFound)
+ })
+}
+
+// WithSpecPath sets the path to be joined to the base path of the Spec middleware.
+//
+// This is empty by default.
+func WithSpecPath(pth string) SpecOption {
+ return func(o *specOptions) {
+ o.Path = pth
+ }
+}
+
+// WithSpecDocument sets the name of the JSON document served as a spec.
+//
+// By default, this is "swagger.json"
+func WithSpecDocument(doc string) SpecOption {
+ return func(o *specOptions) {
+ if doc == "" {
+ return
+ }
+
+ o.Document = doc
+ }
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go
new file mode 100644
index 00000000..ec3c10cb
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go
@@ -0,0 +1,175 @@
+package middleware
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "net/http"
+ "path"
+)
+
+// SwaggerUIOpts configures the SwaggerUI middleware
+type SwaggerUIOpts struct {
+ // BasePath for the API, defaults to: /
+ BasePath string
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
+ Path string
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
+ SpecURL string
+
+ // Title for the documentation site, default to: API documentation
+ Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+
+ // OAuthCallbackURL the url called after OAuth2 login
+ OAuthCallbackURL string
+
+ // The three components needed to embed swagger-ui
+
+ // SwaggerURL points to the js that generates the SwaggerUI site.
+ //
+ // Defaults to: https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js
+ SwaggerURL string
+
+ SwaggerPresetURL string
+ SwaggerStylesURL string
+
+ Favicon32 string
+ Favicon16 string
+}
+
+// EnsureDefaults in case some options are missing
+func (r *SwaggerUIOpts) EnsureDefaults() {
+ r.ensureDefaults()
+
+ if r.Template == "" {
+ r.Template = swaggeruiTemplate
+ }
+}
+
+func (r *SwaggerUIOpts) EnsureDefaultsOauth2() {
+ r.ensureDefaults()
+
+ if r.Template == "" {
+ r.Template = swaggerOAuthTemplate
+ }
+}
+
+func (r *SwaggerUIOpts) ensureDefaults() {
+ common := toCommonUIOptions(r)
+ common.EnsureDefaults()
+ fromCommonToAnyOptions(common, r)
+
+ // swaggerui-specifics
+ if r.OAuthCallbackURL == "" {
+ r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback")
+ }
+ if r.SwaggerURL == "" {
+ r.SwaggerURL = swaggerLatest
+ }
+ if r.SwaggerPresetURL == "" {
+ r.SwaggerPresetURL = swaggerPresetLatest
+ }
+ if r.SwaggerStylesURL == "" {
+ r.SwaggerStylesURL = swaggerStylesLatest
+ }
+ if r.Favicon16 == "" {
+ r.Favicon16 = swaggerFavicon16Latest
+ }
+ if r.Favicon32 == "" {
+ r.Favicon32 = swaggerFavicon32Latest
+ }
+}
+
+// SwaggerUI creates a middleware to serve a documentation site for a swagger spec.
+//
+// This allows for altering the spec before starting the http listener.
+func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler {
+ opts.EnsureDefaults()
+
+ pth := path.Join(opts.BasePath, opts.Path)
+ tmpl := template.Must(template.New("swaggerui").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
+
+ return serveUI(pth, assets.Bytes(), next)
+}
+
+const (
+ swaggerLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js"
+ swaggerPresetLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-standalone-preset.js"
+ swaggerStylesLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui.css"
+ swaggerFavicon32Latest = "https://unpkg.com/swagger-ui-dist/favicon-32x32.png"
+ swaggerFavicon16Latest = "https://unpkg.com/swagger-ui-dist/favicon-16x16.png"
+ swaggeruiTemplate = `
+
+
+
+
+ {{ .Title }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+`
+)
diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go
new file mode 100644
index 00000000..e81212f7
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go
@@ -0,0 +1,105 @@
+package middleware
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "text/template"
+)
+
+func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler {
+ opts.EnsureDefaultsOauth2()
+
+ pth := opts.OAuthCallbackURL
+ tmpl := template.Must(template.New("swaggeroauth").Parse(opts.Template))
+ assets := bytes.NewBuffer(nil)
+ if err := tmpl.Execute(assets, opts); err != nil {
+ panic(fmt.Errorf("cannot execute template: %w", err))
+ }
+
+ return serveUI(pth, assets.Bytes(), next)
+}
+
+const (
+ swaggerOAuthTemplate = `
+
+
+
+ {{ .Title }}
+
+
+
+
+
+`
+)
diff --git a/vendor/github.com/go-openapi/runtime/middleware/ui_options.go b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go
new file mode 100644
index 00000000..b86efa00
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/ui_options.go
@@ -0,0 +1,173 @@
+package middleware
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "net/http"
+ "path"
+ "strings"
+)
+
+const (
+ // constants that are common to all UI-serving middlewares
+ defaultDocsPath = "docs"
+ defaultDocsURL = "/swagger.json"
+ defaultDocsTitle = "API Documentation"
+)
+
+// uiOptions defines common options for UI serving middlewares.
+type uiOptions struct {
+ // BasePath for the UI, defaults to: /
+ BasePath string
+
+ // Path combines with BasePath to construct the path to the UI, defaults to: "docs".
+ Path string
+
+ // SpecURL is the URL of the spec document.
+ //
+ // Defaults to: /swagger.json
+ SpecURL string
+
+ // Title for the documentation site, default to: API documentation
+ Title string
+
+ // Template specifies a custom template to serve the UI
+ Template string
+}
+
+// toCommonUIOptions converts any UI option type to retain the common options.
+//
+// This uses gob encoding/decoding to convert common fields from one struct to another.
+func toCommonUIOptions(opts interface{}) uiOptions {
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ dec := gob.NewDecoder(&buf)
+ var o uiOptions
+ err := enc.Encode(opts)
+ if err != nil {
+ panic(err)
+ }
+
+ err = dec.Decode(&o)
+ if err != nil {
+ panic(err)
+ }
+
+ return o
+}
+
+func fromCommonToAnyOptions[T any](source uiOptions, target *T) {
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ dec := gob.NewDecoder(&buf)
+ err := enc.Encode(source)
+ if err != nil {
+ panic(err)
+ }
+
+ err = dec.Decode(target)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// UIOption can be applied to UI serving middleware, such as Context.APIHandler or
+// Context.APIHandlerSwaggerUI to alter the defaut behavior.
+type UIOption func(*uiOptions)
+
+func uiOptionsWithDefaults(opts []UIOption) uiOptions {
+ var o uiOptions
+ for _, apply := range opts {
+ apply(&o)
+ }
+
+ return o
+}
+
+// WithUIBasePath sets the base path from where to serve the UI assets.
+//
+// By default, Context middleware sets this value to the API base path.
+func WithUIBasePath(base string) UIOption {
+ return func(o *uiOptions) {
+ if !strings.HasPrefix(base, "/") {
+ base = "/" + base
+ }
+ o.BasePath = base
+ }
+}
+
+// WithUIPath sets the path from where to serve the UI assets (i.e. /{basepath}/{path}.
+func WithUIPath(pth string) UIOption {
+ return func(o *uiOptions) {
+ o.Path = pth
+ }
+}
+
+// WithUISpecURL sets the path from where to serve swagger spec document.
+//
+// This may be specified as a full URL or a path.
+//
+// By default, this is "/swagger.json"
+func WithUISpecURL(specURL string) UIOption {
+ return func(o *uiOptions) {
+ o.SpecURL = specURL
+ }
+}
+
+// WithUITitle sets the title of the UI.
+//
+// By default, Context middleware sets this value to the title found in the API spec.
+func WithUITitle(title string) UIOption {
+ return func(o *uiOptions) {
+ o.Title = title
+ }
+}
+
+// WithTemplate allows to set a custom template for the UI.
+//
+// UI middleware will panic if the template does not parse or execute properly.
+func WithTemplate(tpl string) UIOption {
+ return func(o *uiOptions) {
+ o.Template = tpl
+ }
+}
+
+// EnsureDefaults in case some options are missing
+func (r *uiOptions) EnsureDefaults() {
+ if r.BasePath == "" {
+ r.BasePath = "/"
+ }
+ if r.Path == "" {
+ r.Path = defaultDocsPath
+ }
+ if r.SpecURL == "" {
+ r.SpecURL = defaultDocsURL
+ }
+ if r.Title == "" {
+ r.Title = defaultDocsTitle
+ }
+}
+
+// serveUI creates a middleware that serves a templated asset as text/html.
+func serveUI(pth string, assets []byte, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ if path.Clean(r.URL.Path) == pth {
+ rw.Header().Set(contentTypeHeader, "text/html; charset=utf-8")
+ rw.WriteHeader(http.StatusOK)
+ _, _ = rw.Write(assets)
+
+ return
+ }
+
+ if next != nil {
+ next.ServeHTTP(rw, r)
+
+ return
+ }
+
+ rw.Header().Set(contentTypeHeader, "text/plain")
+ rw.WriteHeader(http.StatusNotFound)
+ _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
+ })
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go
new file mode 100644
index 00000000..7b7269bd
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go
@@ -0,0 +1,287 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package untyped
+
+import (
+ "fmt"
+ "net/http"
+ "sort"
+ "strings"
+
+ "github.com/go-openapi/analysis"
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/loads"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/go-openapi/runtime"
+)
+
+// NewAPI creates the default untyped API
+func NewAPI(spec *loads.Document) *API {
+ var an *analysis.Spec
+ if spec != nil && spec.Spec() != nil {
+ an = analysis.New(spec.Spec())
+ }
+ api := &API{
+ spec: spec,
+ analyzer: an,
+ consumers: make(map[string]runtime.Consumer, 10),
+ producers: make(map[string]runtime.Producer, 10),
+ authenticators: make(map[string]runtime.Authenticator),
+ operations: make(map[string]map[string]runtime.OperationHandler),
+ ServeError: errors.ServeError,
+ Models: make(map[string]func() interface{}),
+ formats: strfmt.NewFormats(),
+ }
+ return api.WithJSONDefaults()
+}
+
+// API represents an untyped mux for a swagger spec
+type API struct {
+ spec *loads.Document
+ analyzer *analysis.Spec
+ DefaultProduces string
+ DefaultConsumes string
+ consumers map[string]runtime.Consumer
+ producers map[string]runtime.Producer
+ authenticators map[string]runtime.Authenticator
+ authorizer runtime.Authorizer
+ operations map[string]map[string]runtime.OperationHandler
+ ServeError func(http.ResponseWriter, *http.Request, error)
+ Models map[string]func() interface{}
+ formats strfmt.Registry
+}
+
+// WithJSONDefaults loads the json defaults for this api
+func (d *API) WithJSONDefaults() *API {
+ d.DefaultConsumes = runtime.JSONMime
+ d.DefaultProduces = runtime.JSONMime
+ d.consumers[runtime.JSONMime] = runtime.JSONConsumer()
+ d.producers[runtime.JSONMime] = runtime.JSONProducer()
+ return d
+}
+
+// WithoutJSONDefaults clears the json defaults for this api
+func (d *API) WithoutJSONDefaults() *API {
+ d.DefaultConsumes = ""
+ d.DefaultProduces = ""
+ delete(d.consumers, runtime.JSONMime)
+ delete(d.producers, runtime.JSONMime)
+ return d
+}
+
+// Formats returns the registered string formats
+func (d *API) Formats() strfmt.Registry {
+ if d.formats == nil {
+ d.formats = strfmt.NewFormats()
+ }
+ return d.formats
+}
+
+// RegisterFormat registers a custom format validator
+func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) {
+ if d.formats == nil {
+ d.formats = strfmt.NewFormats()
+ }
+ d.formats.Add(name, format, validator)
+}
+
+// RegisterAuth registers an auth handler in this api
+func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) {
+ if d.authenticators == nil {
+ d.authenticators = make(map[string]runtime.Authenticator)
+ }
+ d.authenticators[scheme] = handler
+}
+
+// RegisterAuthorizer registers an authorizer handler in this api
+func (d *API) RegisterAuthorizer(handler runtime.Authorizer) {
+ d.authorizer = handler
+}
+
+// RegisterConsumer registers a consumer for a media type.
+func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) {
+ if d.consumers == nil {
+ d.consumers = make(map[string]runtime.Consumer, 10)
+ }
+ d.consumers[strings.ToLower(mediaType)] = handler
+}
+
+// RegisterProducer registers a producer for a media type
+func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) {
+ if d.producers == nil {
+ d.producers = make(map[string]runtime.Producer, 10)
+ }
+ d.producers[strings.ToLower(mediaType)] = handler
+}
+
+// RegisterOperation registers an operation handler for an operation name
+func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) {
+ if d.operations == nil {
+ d.operations = make(map[string]map[string]runtime.OperationHandler, 30)
+ }
+ um := strings.ToUpper(method)
+ if b, ok := d.operations[um]; !ok || b == nil {
+ d.operations[um] = make(map[string]runtime.OperationHandler)
+ }
+ d.operations[um][path] = handler
+}
+
+// OperationHandlerFor returns the operation handler for the specified id if it can be found
+func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) {
+ if d.operations == nil {
+ return nil, false
+ }
+ if pi, ok := d.operations[strings.ToUpper(method)]; ok {
+ h, ok := pi[path]
+ return h, ok
+ }
+ return nil, false
+}
+
+// ConsumersFor gets the consumers for the specified media types
+func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer {
+ result := make(map[string]runtime.Consumer)
+ for _, mt := range mediaTypes {
+ if consumer, ok := d.consumers[mt]; ok {
+ result[mt] = consumer
+ }
+ }
+ return result
+}
+
+// ProducersFor gets the producers for the specified media types
+func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer {
+ result := make(map[string]runtime.Producer)
+ for _, mt := range mediaTypes {
+ if producer, ok := d.producers[mt]; ok {
+ result[mt] = producer
+ }
+ }
+ return result
+}
+
+// AuthenticatorsFor gets the authenticators for the specified security schemes
+func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator {
+ result := make(map[string]runtime.Authenticator)
+ for k := range schemes {
+ if a, ok := d.authenticators[k]; ok {
+ result[k] = a
+ }
+ }
+ return result
+}
+
+// Authorizer returns the registered authorizer
+func (d *API) Authorizer() runtime.Authorizer {
+ return d.authorizer
+}
+
+// Validate validates this API for any missing items
+func (d *API) Validate() error {
+ return d.validate()
+}
+
+// validateWith validates the registrations in this API against the provided spec analyzer
+func (d *API) validate() error {
+ consumes := make([]string, 0, len(d.consumers))
+ for k := range d.consumers {
+ consumes = append(consumes, k)
+ }
+
+ produces := make([]string, 0, len(d.producers))
+ for k := range d.producers {
+ produces = append(produces, k)
+ }
+
+ authenticators := make([]string, 0, len(d.authenticators))
+ for k := range d.authenticators {
+ authenticators = append(authenticators, k)
+ }
+
+ operations := make([]string, 0, len(d.operations))
+ for m, v := range d.operations {
+ for p := range v {
+ operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p))
+ }
+ }
+
+ secDefinitions := d.spec.Spec().SecurityDefinitions
+ definedAuths := make([]string, 0, len(secDefinitions))
+ for k := range secDefinitions {
+ definedAuths = append(definedAuths, k)
+ }
+
+ if err := d.verify("consumes", consumes, d.analyzer.RequiredConsumes()); err != nil {
+ return err
+ }
+ if err := d.verify("produces", produces, d.analyzer.RequiredProduces()); err != nil {
+ return err
+ }
+ if err := d.verify("operation", operations, d.analyzer.OperationMethodPaths()); err != nil {
+ return err
+ }
+
+ requiredAuths := d.analyzer.RequiredSecuritySchemes()
+ if err := d.verify("auth scheme", authenticators, requiredAuths); err != nil {
+ return err
+ }
+ if err := d.verify("security definitions", definedAuths, requiredAuths); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (d *API) verify(name string, registrations []string, expectations []string) error {
+ sort.Strings(registrations)
+ sort.Strings(expectations)
+
+ expected := map[string]struct{}{}
+ seen := map[string]struct{}{}
+
+ for _, v := range expectations {
+ expected[v] = struct{}{}
+ }
+
+ var unspecified []string
+ for _, v := range registrations {
+ seen[v] = struct{}{}
+ if _, ok := expected[v]; !ok {
+ unspecified = append(unspecified, v)
+ }
+ }
+
+ for k := range seen {
+ delete(expected, k)
+ }
+
+ unregistered := make([]string, 0, len(expected))
+ for k := range expected {
+ unregistered = append(unregistered, k)
+ }
+ sort.Strings(unspecified)
+ sort.Strings(unregistered)
+
+ if len(unregistered) > 0 || len(unspecified) > 0 {
+ return &errors.APIVerificationFailed{
+ Section: name,
+ MissingSpecification: unspecified,
+ MissingRegistration: unregistered,
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go
new file mode 100644
index 00000000..0a5356c6
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/middleware/validation.go
@@ -0,0 +1,130 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middleware
+
+import (
+ "mime"
+ "net/http"
+ "strings"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/swag"
+
+ "github.com/go-openapi/runtime"
+)
+
+type validation struct {
+ context *Context
+ result []error
+ request *http.Request
+ route *MatchedRoute
+ bound map[string]interface{}
+}
+
+// ContentType validates the content type of a request
+func validateContentType(allowed []string, actual string) error {
+ if len(allowed) == 0 {
+ return nil
+ }
+ mt, _, err := mime.ParseMediaType(actual)
+ if err != nil {
+ return errors.InvalidContentType(actual, allowed)
+ }
+ if swag.ContainsStringsCI(allowed, mt) {
+ return nil
+ }
+ if swag.ContainsStringsCI(allowed, "*/*") {
+ return nil
+ }
+ parts := strings.Split(actual, "/")
+ if len(parts) == 2 && swag.ContainsStringsCI(allowed, parts[0]+"/*") {
+ return nil
+ }
+ return errors.InvalidContentType(actual, allowed)
+}
+
+func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation {
+ validate := &validation{
+ context: ctx,
+ request: request,
+ route: route,
+ bound: make(map[string]interface{}),
+ }
+ validate.debugLogf("validating request %s %s", request.Method, request.URL.EscapedPath())
+
+ validate.contentType()
+ if len(validate.result) == 0 {
+ validate.responseFormat()
+ }
+ if len(validate.result) == 0 {
+ validate.parameters()
+ }
+
+ return validate
+}
+
+func (v *validation) debugLogf(format string, args ...any) {
+ v.context.debugLogf(format, args...)
+}
+
+func (v *validation) parameters() {
+ v.debugLogf("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath())
+ if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil {
+ if result.Error() == "validation failure list" {
+ for _, e := range result.(*errors.Validation).Value.([]interface{}) {
+ v.result = append(v.result, e.(error))
+ }
+ return
+ }
+ v.result = append(v.result, result)
+ }
+}
+
+func (v *validation) contentType() {
+ if len(v.result) == 0 && runtime.HasBody(v.request) {
+ v.debugLogf("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath())
+ ct, _, req, err := v.context.ContentType(v.request)
+ if err != nil {
+ v.result = append(v.result, err)
+ } else {
+ v.request = req
+ }
+
+ if len(v.result) == 0 {
+ v.debugLogf("validating content type for %q against [%s]", ct, strings.Join(v.route.Consumes, ", "))
+ if err := validateContentType(v.route.Consumes, ct); err != nil {
+ v.result = append(v.result, err)
+ }
+ }
+ if ct != "" && v.route.Consumer == nil {
+ cons, ok := v.route.Consumers[ct]
+ if !ok {
+ v.result = append(v.result, errors.New(500, "no consumer registered for %s", ct))
+ } else {
+ v.route.Consumer = cons
+ }
+ }
+ }
+}
+
+func (v *validation) responseFormat() {
+ // if the route provides values for Produces and no format could be identify then return an error.
+ // if the route does not specify values for Produces then treat request as valid since the API designer
+ // choose not to specify the format for responses.
+ if str, rCtx := v.context.ResponseFormat(v.request, v.route.Produces); str == "" && len(v.route.Produces) > 0 {
+ v.request = rCtx
+ v.result = append(v.result, errors.InvalidResponseFormat(v.request.Header.Get(runtime.HeaderAccept), v.route.Produces))
+ }
+}
diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go
new file mode 100644
index 00000000..9e3e1ecb
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/request.go
@@ -0,0 +1,149 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "io"
+ "net/http"
+ "strings"
+
+ "github.com/go-openapi/swag"
+)
+
+// CanHaveBody returns true if this method can have a body
+func CanHaveBody(method string) bool {
+ mn := strings.ToUpper(method)
+ return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE"
+}
+
+// IsSafe returns true if this is a request with a safe method
+func IsSafe(r *http.Request) bool {
+ mn := strings.ToUpper(r.Method)
+ return mn == "GET" || mn == "HEAD"
+}
+
+// AllowsBody returns true if the request allows for a body
+func AllowsBody(r *http.Request) bool {
+ mn := strings.ToUpper(r.Method)
+ return mn != "HEAD"
+}
+
+// HasBody returns true if this method needs a content-type
+func HasBody(r *http.Request) bool {
+ // happy case: we have a content length set
+ if r.ContentLength > 0 {
+ return true
+ }
+
+ if r.Header.Get("content-length") != "" {
+ // in this case, no Transfer-Encoding should be present
+ // we have a header set but it was explicitly set to 0, so we assume no body
+ return false
+ }
+
+ rdr := newPeekingReader(r.Body)
+ r.Body = rdr
+ return rdr.HasContent()
+}
+
+func newPeekingReader(r io.ReadCloser) *peekingReader {
+ if r == nil {
+ return nil
+ }
+ return &peekingReader{
+ underlying: bufio.NewReader(r),
+ orig: r,
+ }
+}
+
+type peekingReader struct {
+ underlying interface {
+ Buffered() int
+ Peek(int) ([]byte, error)
+ Read([]byte) (int, error)
+ }
+ orig io.ReadCloser
+}
+
+func (p *peekingReader) HasContent() bool {
+ if p == nil {
+ return false
+ }
+ if p.underlying.Buffered() > 0 {
+ return true
+ }
+ b, err := p.underlying.Peek(1)
+ if err != nil {
+ return false
+ }
+ return len(b) > 0
+}
+
+func (p *peekingReader) Read(d []byte) (int, error) {
+ if p == nil {
+ return 0, io.EOF
+ }
+ if p.underlying == nil {
+ return 0, io.ErrUnexpectedEOF
+ }
+ return p.underlying.Read(d)
+}
+
+func (p *peekingReader) Close() error {
+ if p.underlying == nil {
+ return errors.New("reader already closed")
+ }
+ p.underlying = nil
+ if p.orig != nil {
+ return p.orig.Close()
+ }
+ return nil
+}
+
+// JSONRequest creates a new http request with json headers set.
+//
+// It uses context.Background.
+func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
+ req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Add(HeaderContentType, JSONMime)
+ req.Header.Add(HeaderAccept, JSONMime)
+ return req, nil
+}
+
+// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool)
+type Gettable interface {
+ GetOK(string) ([]string, bool, bool)
+}
+
+// ReadSingleValue reads a single value from the source
+func ReadSingleValue(values Gettable, name string) string {
+ vv, _, hv := values.GetOK(name)
+ if hv {
+ return vv[len(vv)-1]
+ }
+ return ""
+}
+
+// ReadCollectionValue reads a collection value from a string data source
+func ReadCollectionValue(values Gettable, name, collectionFormat string) []string {
+ v := ReadSingleValue(values, name)
+ return swag.SplitByFormat(v, collectionFormat)
+}
diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go
new file mode 100644
index 00000000..bb30472b
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/security/authenticator.go
@@ -0,0 +1,277 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package security
+
+import (
+ "context"
+ "net/http"
+ "strings"
+
+ "github.com/go-openapi/errors"
+
+ "github.com/go-openapi/runtime"
+)
+
+const (
+ query = "query"
+ header = "header"
+ accessTokenParam = "access_token"
+)
+
+// HttpAuthenticator is a function that authenticates a HTTP request
+func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { //nolint:revive,stylecheck
+ return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) {
+ if request, ok := params.(*http.Request); ok {
+ return handler(request)
+ }
+ if scoped, ok := params.(*ScopedAuthRequest); ok {
+ return handler(scoped.Request)
+ }
+ return false, nil, nil
+ })
+}
+
+// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes
+func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, interface{}, error)) runtime.Authenticator {
+ return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) {
+ if request, ok := params.(*ScopedAuthRequest); ok {
+ return handler(request)
+ }
+ return false, nil, nil
+ })
+}
+
+// UserPassAuthentication authentication function
+type UserPassAuthentication func(string, string) (interface{}, error)
+
+// UserPassAuthenticationCtx authentication function with context.Context
+type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, interface{}, error)
+
+// TokenAuthentication authentication function
+type TokenAuthentication func(string) (interface{}, error)
+
+// TokenAuthenticationCtx authentication function with context.Context
+type TokenAuthenticationCtx func(context.Context, string) (context.Context, interface{}, error)
+
+// ScopedTokenAuthentication authentication function
+type ScopedTokenAuthentication func(string, []string) (interface{}, error)
+
+// ScopedTokenAuthenticationCtx authentication function with context.Context
+type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, interface{}, error)
+
+var DefaultRealmName = "API"
+
+type secCtxKey uint8
+
+const (
+ failedBasicAuth secCtxKey = iota
+ oauth2SchemeName
+)
+
+func FailedBasicAuth(r *http.Request) string {
+ return FailedBasicAuthCtx(r.Context())
+}
+
+func FailedBasicAuthCtx(ctx context.Context) string {
+ v, ok := ctx.Value(failedBasicAuth).(string)
+ if !ok {
+ return ""
+ }
+ return v
+}
+
+func OAuth2SchemeName(r *http.Request) string {
+ return OAuth2SchemeNameCtx(r.Context())
+}
+
+func OAuth2SchemeNameCtx(ctx context.Context) string {
+ v, ok := ctx.Value(oauth2SchemeName).(string)
+ if !ok {
+ return ""
+ }
+ return v
+}
+
+// BasicAuth creates a basic auth authenticator with the provided authentication function
+func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator {
+ return BasicAuthRealm(DefaultRealmName, authenticate)
+}
+
+// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name
+func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator {
+ if realm == "" {
+ realm = DefaultRealmName
+ }
+
+ return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) {
+ if usr, pass, ok := r.BasicAuth(); ok {
+ p, err := authenticate(usr, pass)
+ if err != nil {
+ *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm))
+ }
+ return true, p, err
+ }
+ *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm))
+ return false, nil, nil
+ })
+}
+
+// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context
+func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator {
+ return BasicAuthRealmCtx(DefaultRealmName, authenticate)
+}
+
+// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context
+func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator {
+ if realm == "" {
+ realm = DefaultRealmName
+ }
+
+ return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) {
+ if usr, pass, ok := r.BasicAuth(); ok {
+ ctx, p, err := authenticate(r.Context(), usr, pass)
+ if err != nil {
+ ctx = context.WithValue(ctx, failedBasicAuth, realm)
+ }
+ *r = *r.WithContext(ctx)
+ return true, p, err
+ }
+ *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm))
+ return false, nil, nil
+ })
+}
+
+// APIKeyAuth creates an authenticator that uses a token for authorization.
+// This token can be obtained from either a header or a query string
+func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator {
+ inl := strings.ToLower(in)
+ if inl != query && inl != header {
+ // panic because this is most likely a typo
+ panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\""))
+ }
+
+ var getToken func(*http.Request) string
+ switch inl {
+ case header:
+ getToken = func(r *http.Request) string { return r.Header.Get(name) }
+ case query:
+ getToken = func(r *http.Request) string { return r.URL.Query().Get(name) }
+ }
+
+ return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) {
+ token := getToken(r)
+ if token == "" {
+ return false, nil, nil
+ }
+
+ p, err := authenticate(token)
+ return true, p, err
+ })
+}
+
+// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context.
+// This token can be obtained from either a header or a query string
+func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator {
+ inl := strings.ToLower(in)
+ if inl != query && inl != header {
+ // panic because this is most likely a typo
+ panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\""))
+ }
+
+ var getToken func(*http.Request) string
+ switch inl {
+ case header:
+ getToken = func(r *http.Request) string { return r.Header.Get(name) }
+ case query:
+ getToken = func(r *http.Request) string { return r.URL.Query().Get(name) }
+ }
+
+ return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) {
+ token := getToken(r)
+ if token == "" {
+ return false, nil, nil
+ }
+
+ ctx, p, err := authenticate(r.Context(), token)
+ *r = *r.WithContext(ctx)
+ return true, p, err
+ })
+}
+
+// ScopedAuthRequest contains both a http request and the required scopes for a particular operation
+type ScopedAuthRequest struct {
+ Request *http.Request
+ RequiredScopes []string
+}
+
+// BearerAuth for use with oauth2 flows
+func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator {
+ const prefix = "Bearer "
+ return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) {
+ var token string
+ hdr := r.Request.Header.Get(runtime.HeaderAuthorization)
+ if strings.HasPrefix(hdr, prefix) {
+ token = strings.TrimPrefix(hdr, prefix)
+ }
+ if token == "" {
+ qs := r.Request.URL.Query()
+ token = qs.Get(accessTokenParam)
+ }
+ //#nosec
+ ct, _, _ := runtime.ContentType(r.Request.Header)
+ if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") {
+ token = r.Request.FormValue(accessTokenParam)
+ }
+
+ if token == "" {
+ return false, nil, nil
+ }
+
+ rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name)
+ *r.Request = *r.Request.WithContext(rctx)
+ p, err := authenticate(token, r.RequiredScopes)
+ return true, p, err
+ })
+}
+
+// BearerAuthCtx for use with oauth2 flows with support for context.Context.
+func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator {
+ const prefix = "Bearer "
+ return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) {
+ var token string
+ hdr := r.Request.Header.Get(runtime.HeaderAuthorization)
+ if strings.HasPrefix(hdr, prefix) {
+ token = strings.TrimPrefix(hdr, prefix)
+ }
+ if token == "" {
+ qs := r.Request.URL.Query()
+ token = qs.Get(accessTokenParam)
+ }
+ //#nosec
+ ct, _, _ := runtime.ContentType(r.Request.Header)
+ if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") {
+ token = r.Request.FormValue(accessTokenParam)
+ }
+
+ if token == "" {
+ return false, nil, nil
+ }
+
+ rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name)
+ ctx, p, err := authenticate(rctx, token, r.RequiredScopes)
+ *r.Request = *r.Request.WithContext(ctx)
+ return true, p, err
+ })
+}
diff --git a/vendor/github.com/go-openapi/runtime/security/authorizer.go b/vendor/github.com/go-openapi/runtime/security/authorizer.go
new file mode 100644
index 00000000..00c1a4d6
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/security/authorizer.go
@@ -0,0 +1,27 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package security
+
+import (
+ "net/http"
+
+ "github.com/go-openapi/runtime"
+)
+
+// Authorized provides a default implementation of the Authorizer interface where all
+// requests are authorized (successful)
+func Authorized() runtime.Authorizer {
+ return runtime.AuthorizerFunc(func(_ *http.Request, _ interface{}) error { return nil })
+}
diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go
new file mode 100644
index 00000000..3b011a0b
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/statuses.go
@@ -0,0 +1,90 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+// Statuses lists the most common HTTP status codes to default message
+// taken from https://httpstatuses.com/
+var Statuses = map[int]string{
+ 100: "Continue",
+ 101: "Switching Protocols",
+ 102: "Processing",
+ 103: "Checkpoint",
+ 122: "URI too long",
+ 200: "OK",
+ 201: "Created",
+ 202: "Accepted",
+ 203: "Request Processed",
+ 204: "No Content",
+ 205: "Reset Content",
+ 206: "Partial Content",
+ 207: "Multi-Status",
+ 208: "Already Reported",
+ 226: "IM Used",
+ 300: "Multiple Choices",
+ 301: "Moved Permanently",
+ 302: "Found",
+ 303: "See Other",
+ 304: "Not Modified",
+ 305: "Use Proxy",
+ 306: "Switch Proxy",
+ 307: "Temporary Redirect",
+ 308: "Permanent Redirect",
+ 400: "Bad Request",
+ 401: "Unauthorized",
+ 402: "Payment Required",
+ 403: "Forbidden",
+ 404: "Not Found",
+ 405: "Method Not Allowed",
+ 406: "Not Acceptable",
+ 407: "Proxy Authentication Required",
+ 408: "Request Timeout",
+ 409: "Conflict",
+ 410: "Gone",
+ 411: "Length Required",
+ 412: "Precondition Failed",
+ 413: "Request Entity Too Large",
+ 414: "Request-URI Too Long",
+ 415: "Unsupported Media Type",
+ 416: "Request Range Not Satisfiable",
+ 417: "Expectation Failed",
+ 418: "I'm a teapot",
+ 420: "Enhance Your Calm",
+ 422: "Unprocessable Entity",
+ 423: "Locked",
+ 424: "Failed Dependency",
+ 426: "Upgrade Required",
+ 428: "Precondition Required",
+ 429: "Too Many Requests",
+ 431: "Request Header Fields Too Large",
+ 444: "No Response",
+ 449: "Retry With",
+ 450: "Blocked by Windows Parental Controls",
+ 451: "Wrong Exchange Server",
+ 499: "Client Closed Request",
+ 500: "Internal Server Error",
+ 501: "Not Implemented",
+ 502: "Bad Gateway",
+ 503: "Service Unavailable",
+ 504: "Gateway Timeout",
+ 505: "HTTP Version Not Supported",
+ 506: "Variant Also Negotiates",
+ 507: "Insufficient Storage",
+ 508: "Loop Detected",
+ 509: "Bandwidth Limit Exceeded",
+ 510: "Not Extended",
+ 511: "Network Authentication Required",
+ 598: "Network read timeout error",
+ 599: "Network connect timeout error",
+}
diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go
new file mode 100644
index 00000000..f33320b7
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/text.go
@@ -0,0 +1,116 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+
+ "github.com/go-openapi/swag"
+)
+
+// TextConsumer creates a new text consumer
+func TextConsumer() Consumer {
+ return ConsumerFunc(func(reader io.Reader, data interface{}) error {
+ if reader == nil {
+ return errors.New("TextConsumer requires a reader") // early exit
+ }
+
+ buf := new(bytes.Buffer)
+ _, err := buf.ReadFrom(reader)
+ if err != nil {
+ return err
+ }
+ b := buf.Bytes()
+
+ // If the buffer is empty, no need to unmarshal it, which causes a panic.
+ if len(b) == 0 {
+ return nil
+ }
+
+ if tu, ok := data.(encoding.TextUnmarshaler); ok {
+ err := tu.UnmarshalText(b)
+ if err != nil {
+ return fmt.Errorf("text consumer: %v", err)
+ }
+
+ return nil
+ }
+
+ t := reflect.TypeOf(data)
+ if data != nil && t.Kind() == reflect.Ptr {
+ v := reflect.Indirect(reflect.ValueOf(data))
+ if t.Elem().Kind() == reflect.String {
+ v.SetString(string(b))
+ return nil
+ }
+ }
+
+ return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s",
+ data, data, "can be resolved by supporting TextUnmarshaler interface")
+ })
+}
+
+// TextProducer creates a new text producer
+func TextProducer() Producer {
+ return ProducerFunc(func(writer io.Writer, data interface{}) error {
+ if writer == nil {
+ return errors.New("TextProducer requires a writer") // early exit
+ }
+
+ if data == nil {
+ return errors.New("no data given to produce text from")
+ }
+
+ if tm, ok := data.(encoding.TextMarshaler); ok {
+ txt, err := tm.MarshalText()
+ if err != nil {
+ return fmt.Errorf("text producer: %v", err)
+ }
+ _, err = writer.Write(txt)
+ return err
+ }
+
+ if str, ok := data.(error); ok {
+ _, err := writer.Write([]byte(str.Error()))
+ return err
+ }
+
+ if str, ok := data.(fmt.Stringer); ok {
+ _, err := writer.Write([]byte(str.String()))
+ return err
+ }
+
+ v := reflect.Indirect(reflect.ValueOf(data))
+ if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice {
+ b, err := swag.WriteJSON(data)
+ if err != nil {
+ return err
+ }
+ _, err = writer.Write(b)
+ return err
+ }
+ if v.Kind() != reflect.String {
+ return fmt.Errorf("%T is not a supported type by the TextProducer", data)
+ }
+
+ _, err := writer.Write([]byte(v.String()))
+ return err
+ })
+}
diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go
new file mode 100644
index 00000000..11f5732a
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/values.go
@@ -0,0 +1,19 @@
+package runtime
+
+// Values typically represent parameters on a http request.
+type Values map[string][]string
+
+// GetOK returns the values collection for the given key.
+// When the key is present in the map it will return true for hasKey.
+// When the value is not empty it will return true for hasValue.
+func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) {
+ value, hasKey = v[key]
+ if !hasKey {
+ return
+ }
+ if len(value) == 0 {
+ return
+ }
+ hasValue = true
+ return
+}
diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go
new file mode 100644
index 00000000..821c7393
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/xml.go
@@ -0,0 +1,36 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package runtime
+
+import (
+ "encoding/xml"
+ "io"
+)
+
+// XMLConsumer creates a new XML consumer
+func XMLConsumer() Consumer {
+ return ConsumerFunc(func(reader io.Reader, data interface{}) error {
+ dec := xml.NewDecoder(reader)
+ return dec.Decode(data)
+ })
+}
+
+// XMLProducer creates a new XML producer
+func XMLProducer() Producer {
+ return ProducerFunc(func(writer io.Writer, data interface{}) error {
+ enc := xml.NewEncoder(writer)
+ return enc.Encode(data)
+ })
+}
diff --git a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go
new file mode 100644
index 00000000..a1a0a589
--- /dev/null
+++ b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go
@@ -0,0 +1,39 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yamlpc
+
+import (
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "gopkg.in/yaml.v3"
+)
+
+// YAMLConsumer creates a consumer for yaml data
+func YAMLConsumer() runtime.Consumer {
+ return runtime.ConsumerFunc(func(r io.Reader, v interface{}) error {
+ dec := yaml.NewDecoder(r)
+ return dec.Decode(v)
+ })
+}
+
+// YAMLProducer creates a producer for yaml data
+func YAMLProducer() runtime.Producer {
+ return runtime.ProducerFunc(func(w io.Writer, v interface{}) error {
+ enc := yaml.NewEncoder(w)
+ defer enc.Close()
+ return enc.Encode(v)
+ })
+}
diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig
new file mode 100644
index 00000000..3152da69
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore
new file mode 100644
index 00000000..f47cb204
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/.gitignore
@@ -0,0 +1 @@
+*.out
diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml
new file mode 100644
index 00000000..22f8d21c
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md
new file mode 100644
index 00000000..7fd2810c
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/README.md
@@ -0,0 +1,54 @@
+# OpenAPI v2 object model [](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/spec)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/spec)
+[](https://goreportcard.com/report/github.com/go-openapi/spec)
+
+The object model for OpenAPI specification documents.
+
+### FAQ
+
+* What does this do?
+
+> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model
+> 2. It knows how to resolve $ref and expand them to make a single root document
+
+* How does it play with the rest of the go-openapi packages ?
+
+> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger)
+> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations
+> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it
+> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents
+
+* Does this library support OpenAPI 3?
+
+> No.
+> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0).
+> There is no plan to make it evolve toward supporting OpenAPI 3.x.
+> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story.
+>
+> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3
+
+* Does the unmarshaling support YAML?
+
+> Not directly. The exposed types know only how to unmarshal from JSON.
+>
+> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by
+> github.com/go-openapi/loads
+>
+> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec
+>
+> See also https://github.com/go-openapi/spec/issues/164
+
+* How can I validate a spec?
+
+> Validation is provided by [the validate package](http://github.com/go-openapi/validate)
+
+* Why do we have an `ID` field for `Schema` which is not part of the swagger spec?
+
+> We found jsonschema compatibility more important: since `id` in jsonschema influences
+> how `$ref` are resolved.
+> This `id` does not conflict with any property named `id`.
+>
+> See also https://github.com/go-openapi/spec/issues/23
diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go
new file mode 100644
index 00000000..122993b4
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/cache.go
@@ -0,0 +1,98 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "sync"
+)
+
+// ResolutionCache a cache for resolving urls
+type ResolutionCache interface {
+ Get(string) (interface{}, bool)
+ Set(string, interface{})
+}
+
+type simpleCache struct {
+ lock sync.RWMutex
+ store map[string]interface{}
+}
+
+func (s *simpleCache) ShallowClone() ResolutionCache {
+ store := make(map[string]interface{}, len(s.store))
+ s.lock.RLock()
+ for k, v := range s.store {
+ store[k] = v
+ }
+ s.lock.RUnlock()
+
+ return &simpleCache{
+ store: store,
+ }
+}
+
+// Get retrieves a cached URI
+func (s *simpleCache) Get(uri string) (interface{}, bool) {
+ s.lock.RLock()
+ v, ok := s.store[uri]
+
+ s.lock.RUnlock()
+ return v, ok
+}
+
+// Set caches a URI
+func (s *simpleCache) Set(uri string, data interface{}) {
+ s.lock.Lock()
+ s.store[uri] = data
+ s.lock.Unlock()
+}
+
+var (
+ // resCache is a package level cache for $ref resolution and expansion.
+ // It is initialized lazily by methods that have the need for it: no
+ // memory is allocated unless some expander methods are called.
+ //
+ // It is initialized with JSON schema and swagger schema,
+ // which do not mutate during normal operations.
+ //
+ // All subsequent utilizations of this cache are produced from a shallow
+ // clone of this initial version.
+ resCache *simpleCache
+ onceCache sync.Once
+
+ _ ResolutionCache = &simpleCache{}
+)
+
+// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call.
+func initResolutionCache() {
+ resCache = defaultResolutionCache()
+}
+
+func defaultResolutionCache() *simpleCache {
+ return &simpleCache{store: map[string]interface{}{
+ "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(),
+ "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(),
+ }}
+}
+
+func cacheOrDefault(cache ResolutionCache) ResolutionCache {
+ onceCache.Do(initResolutionCache)
+
+ if cache != nil {
+ return cache
+ }
+
+ // get a shallow clone of the base cache with swagger and json schema
+ return resCache.ShallowClone()
+}
diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go
new file mode 100644
index 00000000..2f7bb219
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/contact_info.go
@@ -0,0 +1,57 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+// ContactInfo contact information for the exposed API.
+//
+// For more information: http://goo.gl/8us55a#contactObject
+type ContactInfo struct {
+ ContactInfoProps
+ VendorExtensible
+}
+
+// ContactInfoProps hold the properties of a ContactInfo object
+type ContactInfoProps struct {
+ Name string `json:"name,omitempty"`
+ URL string `json:"url,omitempty"`
+ Email string `json:"email,omitempty"`
+}
+
+// UnmarshalJSON hydrates ContactInfo from json
+func (c *ContactInfo) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &c.VendorExtensible)
+}
+
+// MarshalJSON produces ContactInfo as json
+func (c ContactInfo) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(c.ContactInfoProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(c.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go
new file mode 100644
index 00000000..fc889f6d
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/debug.go
@@ -0,0 +1,49 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "runtime"
+)
+
+// Debug is true when the SWAGGER_DEBUG env var is not empty.
+//
+// It enables a more verbose logging of this package.
+var Debug = os.Getenv("SWAGGER_DEBUG") != ""
+
+var (
+ // specLogger is a debug logger for this package
+ specLogger *log.Logger
+)
+
+func init() {
+ debugOptions()
+}
+
+func debugOptions() {
+ specLogger = log.New(os.Stdout, "spec:", log.LstdFlags)
+}
+
+func debugLog(msg string, args ...interface{}) {
+ // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog()
+ if Debug {
+ _, file1, pos1, _ := runtime.Caller(1)
+ specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go
new file mode 100644
index 00000000..1f428475
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/embed.go
@@ -0,0 +1,17 @@
+package spec
+
+import (
+ "embed"
+ "path"
+)
+
+//go:embed schemas/*.json schemas/*/*.json
+var assets embed.FS
+
+func jsonschemaDraft04JSONBytes() ([]byte, error) {
+ return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json"))
+}
+
+func v2SchemaJSONBytes() ([]byte, error) {
+ return assets.ReadFile(path.Join("schemas", "v2", "schema.json"))
+}
diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go
new file mode 100644
index 00000000..6992c7ba
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/errors.go
@@ -0,0 +1,19 @@
+package spec
+
+import "errors"
+
+// Error codes
+var (
+ // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type
+ ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference")
+
+ // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer
+ ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer")
+
+ // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type.
+ // At the moment, $ref are supported only inside: schemas, parameters, responses, path items
+ ErrDerefUnsupportedType = errors.New("deref: unsupported type")
+
+ // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type
+ ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response")
+)
diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go
new file mode 100644
index 00000000..b81a5699
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/expander.go
@@ -0,0 +1,607 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// ExpandOptions provides options for the spec expander.
+//
+// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file.
+//
+// If left empty, the root document is assumed to be located in the current working directory:
+// all relative $ref's will be resolved from there.
+//
+// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable.
+type ExpandOptions struct {
+ RelativeBase string // the path to the root document to expand. This is a file, not a directory
+ SkipSchemas bool // do not expand schemas, just paths, parameters and responses
+ ContinueOnError bool // continue expanding even after and error is found
+ PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document
+ AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs
+}
+
+func optionsOrDefault(opts *ExpandOptions) *ExpandOptions {
+ if opts != nil {
+ clone := *opts // shallow clone to avoid internal changes to be propagated to the caller
+ if clone.RelativeBase != "" {
+ clone.RelativeBase = normalizeBase(clone.RelativeBase)
+ }
+ // if the relative base is empty, let the schema loader choose a pseudo root document
+ return &clone
+ }
+ return &ExpandOptions{}
+}
+
+// ExpandSpec expands the references in a swagger spec
+func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
+ options = optionsOrDefault(options)
+ resolver := defaultSchemaLoader(spec, options, nil, nil)
+
+ specBasePath := options.RelativeBase
+
+ if !options.SkipSchemas {
+ for key, definition := range spec.Definitions {
+ parentRefs := make([]string, 0, 10)
+ parentRefs = append(parentRefs, "#/definitions/"+key)
+
+ def, err := expandSchema(definition, parentRefs, resolver, specBasePath)
+ if resolver.shouldStopOnError(err) {
+ return err
+ }
+ if def != nil {
+ spec.Definitions[key] = *def
+ }
+ }
+ }
+
+ for key := range spec.Parameters {
+ parameter := spec.Parameters[key]
+ if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+ spec.Parameters[key] = parameter
+ }
+
+ for key := range spec.Responses {
+ response := spec.Responses[key]
+ if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+ spec.Responses[key] = response
+ }
+
+ if spec.Paths != nil {
+ for key := range spec.Paths.Paths {
+ pth := spec.Paths.Paths[key]
+ if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+ spec.Paths.Paths[key] = pth
+ }
+ }
+
+ return nil
+}
+
+const rootBase = ".root"
+
+// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry
+// for further $ref resolution
+func baseForRoot(root interface{}, cache ResolutionCache) string {
+ // cache the root document to resolve $ref's
+ normalizedBase := normalizeBase(rootBase)
+
+ if root == nil {
+ // ensure that we never leave a nil root: always cache the root base pseudo-document
+ cachedRoot, found := cache.Get(normalizedBase)
+ if found && cachedRoot != nil {
+ // the cache is already preloaded with a root
+ return normalizedBase
+ }
+
+ root = map[string]interface{}{}
+ }
+
+ cache.Set(normalizedBase, root)
+
+ return normalizedBase
+}
+
+// ExpandSchema expands the refs in the schema object with reference to the root object.
+//
+// go-openapi/validate uses this function.
+//
+// Notice that it is impossible to reference a json schema in a different document other than root
+// (use ExpandSchemaWithBasePath to resolve external references).
+//
+// Setting the cache is optional and this parameter may safely be left to nil.
+func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
+ cache = cacheOrDefault(cache)
+ if root == nil {
+ root = schema
+ }
+
+ opts := &ExpandOptions{
+ // when a root is specified, cache the root as an in-memory document for $ref retrieval
+ RelativeBase: baseForRoot(root, cache),
+ SkipSchemas: false,
+ ContinueOnError: false,
+ }
+
+ return ExpandSchemaWithBasePath(schema, cache, opts)
+}
+
+// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options.
+//
+// Setting the cache is optional and this parameter may safely be left to nil.
+func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error {
+ if schema == nil {
+ return nil
+ }
+
+ cache = cacheOrDefault(cache)
+
+ opts = optionsOrDefault(opts)
+
+ resolver := defaultSchemaLoader(nil, opts, cache, nil)
+
+ parentRefs := make([]string, 0, 10)
+ s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase)
+ if err != nil {
+ return err
+ }
+ if s != nil {
+ // guard for when continuing on error
+ *schema = *s
+ }
+
+ return nil
+}
+
+func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) {
+ if target.Items == nil {
+ return &target, nil
+ }
+
+ // array
+ if target.Items.Schema != nil {
+ t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath)
+ if err != nil {
+ return nil, err
+ }
+ *target.Items.Schema = *t
+ }
+
+ // tuple
+ for i := range target.Items.Schemas {
+ t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath)
+ if err != nil {
+ return nil, err
+ }
+ target.Items.Schemas[i] = *t
+ }
+
+ return &target, nil
+}
+
+func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) {
+ if target.Ref.String() == "" && target.Ref.IsRoot() {
+ newRef := normalizeRef(&target.Ref, basePath)
+ target.Ref = *newRef
+ return &target, nil
+ }
+
+ // change the base path of resolution when an ID is encountered
+ // otherwise the basePath should inherit the parent's
+ if target.ID != "" {
+ basePath, _ = resolver.setSchemaID(target, target.ID, basePath)
+ }
+
+ if target.Ref.String() != "" {
+ if !resolver.options.SkipSchemas {
+ return expandSchemaRef(target, parentRefs, resolver, basePath)
+ }
+
+ // when "expand" with SkipSchema, we just rebase the existing $ref without replacing
+ // the full schema.
+ rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath))
+ if err != nil {
+ return nil, err
+ }
+ target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID)
+
+ return &target, nil
+ }
+
+ for k := range target.Definitions {
+ tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return &target, err
+ }
+ if tt != nil {
+ target.Definitions[k] = *tt
+ }
+ }
+
+ t, err := expandItems(target, parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return &target, err
+ }
+ if t != nil {
+ target = *t
+ }
+
+ for i := range target.AllOf {
+ t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return &target, err
+ }
+ if t != nil {
+ target.AllOf[i] = *t
+ }
+ }
+
+ for i := range target.AnyOf {
+ t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return &target, err
+ }
+ if t != nil {
+ target.AnyOf[i] = *t
+ }
+ }
+
+ for i := range target.OneOf {
+ t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return &target, err
+ }
+ if t != nil {
+ target.OneOf[i] = *t
+ }
+ }
+
+ if target.Not != nil {
+ t, err := expandSchema(*target.Not, parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return &target, err
+ }
+ if t != nil {
+ *target.Not = *t
+ }
+ }
+
+ for k := range target.Properties {
+ t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return &target, err
+ }
+ if t != nil {
+ target.Properties[k] = *t
+ }
+ }
+
+ if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
+ t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return &target, err
+ }
+ if t != nil {
+ *target.AdditionalProperties.Schema = *t
+ }
+ }
+
+ for k := range target.PatternProperties {
+ t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return &target, err
+ }
+ if t != nil {
+ target.PatternProperties[k] = *t
+ }
+ }
+
+ for k := range target.Dependencies {
+ if target.Dependencies[k].Schema != nil {
+ t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return &target, err
+ }
+ if t != nil {
+ *target.Dependencies[k].Schema = *t
+ }
+ }
+ }
+
+ if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
+ t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return &target, err
+ }
+ if t != nil {
+ *target.AdditionalItems.Schema = *t
+ }
+ }
+ return &target, nil
+}
+
+func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) {
+ // if a Ref is found, all sibling fields are skipped
+ // Ref also changes the resolution scope of children expandSchema
+
+ // here the resolution scope is changed because a $ref was encountered
+ normalizedRef := normalizeRef(&target.Ref, basePath)
+ normalizedBasePath := normalizedRef.RemoteURI()
+
+ if resolver.isCircular(normalizedRef, basePath, parentRefs...) {
+ // this means there is a cycle in the recursion tree: return the Ref
+ // - circular refs cannot be expanded. We leave them as ref.
+ // - denormalization means that a new local file ref is set relative to the original basePath
+ debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s",
+ basePath, normalizedBasePath, normalizedRef.String())
+ if !resolver.options.AbsoluteCircularRef {
+ target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID)
+ } else {
+ target.Ref = *normalizedRef
+ }
+ return &target, nil
+ }
+
+ var t *Schema
+ err := resolver.Resolve(&target.Ref, &t, basePath)
+ if resolver.shouldStopOnError(err) {
+ return nil, err
+ }
+
+ if t == nil {
+ // guard for when continuing on error
+ return &target, nil
+ }
+
+ parentRefs = append(parentRefs, normalizedRef.String())
+ transitiveResolver := resolver.transitiveResolver(basePath, target.Ref)
+
+ basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath)
+
+ return expandSchema(*t, parentRefs, transitiveResolver, basePath)
+}
+
+func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error {
+ if pathItem == nil {
+ return nil
+ }
+
+ parentRefs := make([]string, 0, 10)
+ if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+
+ if pathItem.Ref.String() != "" {
+ transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref)
+ basePath = transitiveResolver.updateBasePath(resolver, basePath)
+ resolver = transitiveResolver
+ }
+
+ pathItem.Ref = Ref{}
+ for i := range pathItem.Parameters {
+ if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+ }
+
+ ops := []*Operation{
+ pathItem.Get,
+ pathItem.Head,
+ pathItem.Options,
+ pathItem.Put,
+ pathItem.Post,
+ pathItem.Patch,
+ pathItem.Delete,
+ }
+ for _, op := range ops {
+ if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error {
+ if op == nil {
+ return nil
+ }
+
+ for i := range op.Parameters {
+ param := op.Parameters[i]
+ if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+ op.Parameters[i] = param
+ }
+
+ if op.Responses == nil {
+ return nil
+ }
+
+ responses := op.Responses
+ if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+
+ for code := range responses.StatusCodeResponses {
+ response := responses.StatusCodeResponses[code]
+ if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+ responses.StatusCodeResponses[code] = response
+ }
+
+ return nil
+}
+
+// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document
+//
+// Notice that it is impossible to reference a json schema in a different document other than root
+// (use ExpandResponse to resolve external references).
+//
+// Setting the cache is optional and this parameter may safely be left to nil.
+func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error {
+ cache = cacheOrDefault(cache)
+ opts := &ExpandOptions{
+ RelativeBase: baseForRoot(root, cache),
+ }
+ resolver := defaultSchemaLoader(root, opts, cache, nil)
+
+ return expandParameterOrResponse(response, resolver, opts.RelativeBase)
+}
+
+// ExpandResponse expands a response based on a basepath
+//
+// All refs inside response will be resolved relative to basePath
+func ExpandResponse(response *Response, basePath string) error {
+ opts := optionsOrDefault(&ExpandOptions{
+ RelativeBase: basePath,
+ })
+ resolver := defaultSchemaLoader(nil, opts, nil, nil)
+
+ return expandParameterOrResponse(response, resolver, opts.RelativeBase)
+}
+
+// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document.
+//
+// Notice that it is impossible to reference a json schema in a different document other than root
+// (use ExpandParameter to resolve external references).
+func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error {
+ cache = cacheOrDefault(cache)
+
+ opts := &ExpandOptions{
+ RelativeBase: baseForRoot(root, cache),
+ }
+ resolver := defaultSchemaLoader(root, opts, cache, nil)
+
+ return expandParameterOrResponse(parameter, resolver, opts.RelativeBase)
+}
+
+// ExpandParameter expands a parameter based on a basepath.
+// This is the exported version of expandParameter
+// all refs inside parameter will be resolved relative to basePath
+func ExpandParameter(parameter *Parameter, basePath string) error {
+ opts := optionsOrDefault(&ExpandOptions{
+ RelativeBase: basePath,
+ })
+ resolver := defaultSchemaLoader(nil, opts, nil, nil)
+
+ return expandParameterOrResponse(parameter, resolver, opts.RelativeBase)
+}
+
+func getRefAndSchema(input interface{}) (*Ref, *Schema, error) {
+ var (
+ ref *Ref
+ sch *Schema
+ )
+
+ switch refable := input.(type) {
+ case *Parameter:
+ if refable == nil {
+ return nil, nil, nil
+ }
+ ref = &refable.Ref
+ sch = refable.Schema
+ case *Response:
+ if refable == nil {
+ return nil, nil, nil
+ }
+ ref = &refable.Ref
+ sch = refable.Schema
+ default:
+ return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType)
+ }
+
+ return ref, sch, nil
+}
+
+func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error {
+ ref, sch, err := getRefAndSchema(input)
+ if err != nil {
+ return err
+ }
+
+ if ref == nil && sch == nil { // nothing to do
+ return nil
+ }
+
+ parentRefs := make([]string, 0, 10)
+ if ref != nil {
+ // dereference this $ref
+ if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) {
+ return err
+ }
+
+ ref, sch, _ = getRefAndSchema(input)
+ }
+
+ if ref.String() != "" {
+ transitiveResolver := resolver.transitiveResolver(basePath, *ref)
+ basePath = resolver.updateBasePath(transitiveResolver, basePath)
+ resolver = transitiveResolver
+ }
+
+ if sch == nil {
+ // nothing to be expanded
+ if ref != nil {
+ *ref = Ref{}
+ }
+
+ return nil
+ }
+
+ if sch.Ref.String() != "" {
+ rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath))
+ if ern != nil {
+ return ern
+ }
+
+ if resolver.isCircular(&rebasedRef, basePath, parentRefs...) {
+ // this is a circular $ref: stop expansion
+ if !resolver.options.AbsoluteCircularRef {
+ sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID)
+ } else {
+ sch.Ref = rebasedRef
+ }
+ }
+ }
+
+ // $ref expansion or rebasing is performed by expandSchema below
+ if ref != nil {
+ *ref = Ref{}
+ }
+
+ // expand schema
+ // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref)
+ s, err := expandSchema(*sch, parentRefs, resolver, basePath)
+ if resolver.shouldStopOnError(err) {
+ return err
+ }
+
+ if s != nil { // guard for when continuing on error
+ *sch = *s
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go
new file mode 100644
index 00000000..88add91b
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/external_docs.go
@@ -0,0 +1,24 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// ExternalDocumentation allows referencing an external resource for
+// extended documentation.
+//
+// For more information: http://goo.gl/8us55a#externalDocumentationObject
+type ExternalDocumentation struct {
+ Description string `json:"description,omitempty"`
+ URL string `json:"url,omitempty"`
+}
diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go
new file mode 100644
index 00000000..9dfd17b1
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/header.go
@@ -0,0 +1,203 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+const (
+ jsonArray = "array"
+)
+
+// HeaderProps describes a response header
+type HeaderProps struct {
+ Description string `json:"description,omitempty"`
+}
+
+// Header describes a header for a response of the API
+//
+// For more information: http://goo.gl/8us55a#headerObject
+type Header struct {
+ CommonValidations
+ SimpleSchema
+ VendorExtensible
+ HeaderProps
+}
+
+// ResponseHeader creates a new header instance for use in a response
+func ResponseHeader() *Header {
+ return new(Header)
+}
+
+// WithDescription sets the description on this response, allows for chaining
+func (h *Header) WithDescription(description string) *Header {
+ h.Description = description
+ return h
+}
+
+// Typed a fluent builder method for the type of parameter
+func (h *Header) Typed(tpe, format string) *Header {
+ h.Type = tpe
+ h.Format = format
+ return h
+}
+
+// CollectionOf a fluent builder method for an array item
+func (h *Header) CollectionOf(items *Items, format string) *Header {
+ h.Type = jsonArray
+ h.Items = items
+ h.CollectionFormat = format
+ return h
+}
+
+// WithDefault sets the default value on this item
+func (h *Header) WithDefault(defaultValue interface{}) *Header {
+ h.Default = defaultValue
+ return h
+}
+
+// WithMaxLength sets a max length value
+func (h *Header) WithMaxLength(max int64) *Header {
+ h.MaxLength = &max
+ return h
+}
+
+// WithMinLength sets a min length value
+func (h *Header) WithMinLength(min int64) *Header {
+ h.MinLength = &min
+ return h
+}
+
+// WithPattern sets a pattern value
+func (h *Header) WithPattern(pattern string) *Header {
+ h.Pattern = pattern
+ return h
+}
+
+// WithMultipleOf sets a multiple of value
+func (h *Header) WithMultipleOf(number float64) *Header {
+ h.MultipleOf = &number
+ return h
+}
+
+// WithMaximum sets a maximum number value
+func (h *Header) WithMaximum(max float64, exclusive bool) *Header {
+ h.Maximum = &max
+ h.ExclusiveMaximum = exclusive
+ return h
+}
+
+// WithMinimum sets a minimum number value
+func (h *Header) WithMinimum(min float64, exclusive bool) *Header {
+ h.Minimum = &min
+ h.ExclusiveMinimum = exclusive
+ return h
+}
+
+// WithEnum sets a the enum values (replace)
+func (h *Header) WithEnum(values ...interface{}) *Header {
+ h.Enum = append([]interface{}{}, values...)
+ return h
+}
+
+// WithMaxItems sets the max items
+func (h *Header) WithMaxItems(size int64) *Header {
+ h.MaxItems = &size
+ return h
+}
+
+// WithMinItems sets the min items
+func (h *Header) WithMinItems(size int64) *Header {
+ h.MinItems = &size
+ return h
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (h *Header) UniqueValues() *Header {
+ h.UniqueItems = true
+ return h
+}
+
+// AllowDuplicates this array can have duplicates
+func (h *Header) AllowDuplicates() *Header {
+ h.UniqueItems = false
+ return h
+}
+
+// WithValidations is a fluent method to set header validations
+func (h *Header) WithValidations(val CommonValidations) *Header {
+ h.SetValidations(SchemaValidations{CommonValidations: val})
+ return h
+}
+
+// MarshalJSON marshal this to JSON
+func (h Header) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(h.CommonValidations)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(h.SimpleSchema)
+ if err != nil {
+ return nil, err
+ }
+ b3, err := json.Marshal(h.HeaderProps)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// UnmarshalJSON unmarshals this header from JSON
+func (h *Header) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &h.CommonValidations); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &h.SimpleSchema); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &h.VendorExtensible); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &h.HeaderProps)
+}
+
+// JSONLookup look up a value by the json property name
+func (h Header) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := h.Extensions[token]; ok {
+ return &ex, nil
+ }
+
+ r, _, err := jsonpointer.GetForToken(h.CommonValidations, token)
+ if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
+ return nil, err
+ }
+ if r != nil {
+ return r, nil
+ }
+ r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token)
+ if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
+ return nil, err
+ }
+ if r != nil {
+ return r, nil
+ }
+ r, _, err = jsonpointer.GetForToken(h.HeaderProps, token)
+ return r, err
+}
diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go
new file mode 100644
index 00000000..582f0fd4
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/info.go
@@ -0,0 +1,184 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "strconv"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// Extensions vendor specific extensions
+type Extensions map[string]interface{}
+
+// Add adds a value to these extensions
+func (e Extensions) Add(key string, value interface{}) {
+ realKey := strings.ToLower(key)
+ e[realKey] = value
+}
+
+// GetString gets a string value from the extensions
+func (e Extensions) GetString(key string) (string, bool) {
+ if v, ok := e[strings.ToLower(key)]; ok {
+ str, ok := v.(string)
+ return str, ok
+ }
+ return "", false
+}
+
+// GetInt gets a int value from the extensions
+func (e Extensions) GetInt(key string) (int, bool) {
+ realKey := strings.ToLower(key)
+
+ if v, ok := e.GetString(realKey); ok {
+ if r, err := strconv.Atoi(v); err == nil {
+ return r, true
+ }
+ }
+
+ if v, ok := e[realKey]; ok {
+ if r, rOk := v.(float64); rOk {
+ return int(r), true
+ }
+ }
+ return -1, false
+}
+
+// GetBool gets a string value from the extensions
+func (e Extensions) GetBool(key string) (bool, bool) {
+ if v, ok := e[strings.ToLower(key)]; ok {
+ str, ok := v.(bool)
+ return str, ok
+ }
+ return false, false
+}
+
+// GetStringSlice gets a string value from the extensions
+func (e Extensions) GetStringSlice(key string) ([]string, bool) {
+ if v, ok := e[strings.ToLower(key)]; ok {
+ arr, isSlice := v.([]interface{})
+ if !isSlice {
+ return nil, false
+ }
+ var strs []string
+ for _, iface := range arr {
+ str, isString := iface.(string)
+ if !isString {
+ return nil, false
+ }
+ strs = append(strs, str)
+ }
+ return strs, ok
+ }
+ return nil, false
+}
+
+// VendorExtensible composition block.
+type VendorExtensible struct {
+ Extensions Extensions
+}
+
+// AddExtension adds an extension to this extensible object
+func (v *VendorExtensible) AddExtension(key string, value interface{}) {
+ if value == nil {
+ return
+ }
+ if v.Extensions == nil {
+ v.Extensions = make(map[string]interface{})
+ }
+ v.Extensions.Add(key, value)
+}
+
+// MarshalJSON marshals the extensions to json
+func (v VendorExtensible) MarshalJSON() ([]byte, error) {
+ toser := make(map[string]interface{})
+ for k, v := range v.Extensions {
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-") {
+ toser[k] = v
+ }
+ }
+ return json.Marshal(toser)
+}
+
+// UnmarshalJSON for this extensible object
+func (v *VendorExtensible) UnmarshalJSON(data []byte) error {
+ var d map[string]interface{}
+ if err := json.Unmarshal(data, &d); err != nil {
+ return err
+ }
+ for k, vv := range d {
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-") {
+ if v.Extensions == nil {
+ v.Extensions = map[string]interface{}{}
+ }
+ v.Extensions[k] = vv
+ }
+ }
+ return nil
+}
+
+// InfoProps the properties for an info definition
+type InfoProps struct {
+ Description string `json:"description,omitempty"`
+ Title string `json:"title,omitempty"`
+ TermsOfService string `json:"termsOfService,omitempty"`
+ Contact *ContactInfo `json:"contact,omitempty"`
+ License *License `json:"license,omitempty"`
+ Version string `json:"version,omitempty"`
+}
+
+// Info object provides metadata about the API.
+// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience.
+//
+// For more information: http://goo.gl/8us55a#infoObject
+type Info struct {
+ VendorExtensible
+ InfoProps
+}
+
+// JSONLookup look up a value by the json property name
+func (i Info) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := i.Extensions[token]; ok {
+ return &ex, nil
+ }
+ r, _, err := jsonpointer.GetForToken(i.InfoProps, token)
+ return r, err
+}
+
+// MarshalJSON marshal this to JSON
+func (i Info) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(i.InfoProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(i.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (i *Info) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &i.InfoProps); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &i.VendorExtensible)
+}
diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go
new file mode 100644
index 00000000..e2afb213
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/items.go
@@ -0,0 +1,234 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+const (
+ jsonRef = "$ref"
+)
+
+// SimpleSchema describe swagger simple schemas for parameters and headers
+type SimpleSchema struct {
+ Type string `json:"type,omitempty"`
+ Nullable bool `json:"nullable,omitempty"`
+ Format string `json:"format,omitempty"`
+ Items *Items `json:"items,omitempty"`
+ CollectionFormat string `json:"collectionFormat,omitempty"`
+ Default interface{} `json:"default,omitempty"`
+ Example interface{} `json:"example,omitempty"`
+}
+
+// TypeName return the type (or format) of a simple schema
+func (s *SimpleSchema) TypeName() string {
+ if s.Format != "" {
+ return s.Format
+ }
+ return s.Type
+}
+
+// ItemsTypeName yields the type of items in a simple schema array
+func (s *SimpleSchema) ItemsTypeName() string {
+ if s.Items == nil {
+ return ""
+ }
+ return s.Items.TypeName()
+}
+
+// Items a limited subset of JSON-Schema's items object.
+// It is used by parameter definitions that are not located in "body".
+//
+// For more information: http://goo.gl/8us55a#items-object
+type Items struct {
+ Refable
+ CommonValidations
+ SimpleSchema
+ VendorExtensible
+}
+
+// NewItems creates a new instance of items
+func NewItems() *Items {
+ return &Items{}
+}
+
+// Typed a fluent builder method for the type of item
+func (i *Items) Typed(tpe, format string) *Items {
+ i.Type = tpe
+ i.Format = format
+ return i
+}
+
+// AsNullable flags this schema as nullable.
+func (i *Items) AsNullable() *Items {
+ i.Nullable = true
+ return i
+}
+
+// CollectionOf a fluent builder method for an array item
+func (i *Items) CollectionOf(items *Items, format string) *Items {
+ i.Type = jsonArray
+ i.Items = items
+ i.CollectionFormat = format
+ return i
+}
+
+// WithDefault sets the default value on this item
+func (i *Items) WithDefault(defaultValue interface{}) *Items {
+ i.Default = defaultValue
+ return i
+}
+
+// WithMaxLength sets a max length value
+func (i *Items) WithMaxLength(max int64) *Items {
+ i.MaxLength = &max
+ return i
+}
+
+// WithMinLength sets a min length value
+func (i *Items) WithMinLength(min int64) *Items {
+ i.MinLength = &min
+ return i
+}
+
+// WithPattern sets a pattern value
+func (i *Items) WithPattern(pattern string) *Items {
+ i.Pattern = pattern
+ return i
+}
+
+// WithMultipleOf sets a multiple of value
+func (i *Items) WithMultipleOf(number float64) *Items {
+ i.MultipleOf = &number
+ return i
+}
+
+// WithMaximum sets a maximum number value
+func (i *Items) WithMaximum(max float64, exclusive bool) *Items {
+ i.Maximum = &max
+ i.ExclusiveMaximum = exclusive
+ return i
+}
+
+// WithMinimum sets a minimum number value
+func (i *Items) WithMinimum(min float64, exclusive bool) *Items {
+ i.Minimum = &min
+ i.ExclusiveMinimum = exclusive
+ return i
+}
+
+// WithEnum sets a the enum values (replace)
+func (i *Items) WithEnum(values ...interface{}) *Items {
+ i.Enum = append([]interface{}{}, values...)
+ return i
+}
+
+// WithMaxItems sets the max items
+func (i *Items) WithMaxItems(size int64) *Items {
+ i.MaxItems = &size
+ return i
+}
+
+// WithMinItems sets the min items
+func (i *Items) WithMinItems(size int64) *Items {
+ i.MinItems = &size
+ return i
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (i *Items) UniqueValues() *Items {
+ i.UniqueItems = true
+ return i
+}
+
+// AllowDuplicates this array can have duplicates
+func (i *Items) AllowDuplicates() *Items {
+ i.UniqueItems = false
+ return i
+}
+
+// WithValidations is a fluent method to set Items validations
+func (i *Items) WithValidations(val CommonValidations) *Items {
+ i.SetValidations(SchemaValidations{CommonValidations: val})
+ return i
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (i *Items) UnmarshalJSON(data []byte) error {
+ var validations CommonValidations
+ if err := json.Unmarshal(data, &validations); err != nil {
+ return err
+ }
+ var ref Refable
+ if err := json.Unmarshal(data, &ref); err != nil {
+ return err
+ }
+ var simpleSchema SimpleSchema
+ if err := json.Unmarshal(data, &simpleSchema); err != nil {
+ return err
+ }
+ var vendorExtensible VendorExtensible
+ if err := json.Unmarshal(data, &vendorExtensible); err != nil {
+ return err
+ }
+ i.Refable = ref
+ i.CommonValidations = validations
+ i.SimpleSchema = simpleSchema
+ i.VendorExtensible = vendorExtensible
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (i Items) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(i.CommonValidations)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(i.SimpleSchema)
+ if err != nil {
+ return nil, err
+ }
+ b3, err := json.Marshal(i.Refable)
+ if err != nil {
+ return nil, err
+ }
+ b4, err := json.Marshal(i.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b4, b3, b1, b2), nil
+}
+
+// JSONLookup look up a value by the json property name
+func (i Items) JSONLookup(token string) (interface{}, error) {
+ if token == jsonRef {
+ return &i.Ref, nil
+ }
+
+ r, _, err := jsonpointer.GetForToken(i.CommonValidations, token)
+ if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
+ return nil, err
+ }
+ if r != nil {
+ return r, nil
+ }
+ r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token)
+ return r, err
+}
diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go
new file mode 100644
index 00000000..b42f8036
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/license.go
@@ -0,0 +1,56 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+// License information for the exposed API.
+//
+// For more information: http://goo.gl/8us55a#licenseObject
+type License struct {
+ LicenseProps
+ VendorExtensible
+}
+
+// LicenseProps holds the properties of a License object
+type LicenseProps struct {
+ Name string `json:"name,omitempty"`
+ URL string `json:"url,omitempty"`
+}
+
+// UnmarshalJSON hydrates License from json
+func (l *License) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &l.LicenseProps); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &l.VendorExtensible)
+}
+
+// MarshalJSON produces License as json
+func (l License) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(l.LicenseProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(l.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go
new file mode 100644
index 00000000..e8b60099
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/normalizer.go
@@ -0,0 +1,202 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "net/url"
+ "path"
+ "strings"
+)
+
+const fileScheme = "file"
+
+// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized.
+//
+// NOTE(windows): there is a tolerance over the strict URI format on windows.
+//
+// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like
+// 'C:\Path\file.Yaml'.
+//
+// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path:
+// 'file:///c:/path/file.yaml'
+//
+// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or
+// 'file:///c:\folder\File.json'.
+//
+// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair"
+// is attempted.
+//
+// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()).
+func normalizeURI(refPath, base string) string {
+ refURL, err := parseURL(refPath)
+ if err != nil {
+ specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err)
+ refURL, refPath = repairURI(refPath)
+ }
+
+ fixWindowsURI(refURL, refPath) // noop on non-windows OS
+
+ refURL.Path = path.Clean(refURL.Path)
+ if refURL.Path == "." {
+ refURL.Path = ""
+ }
+
+ r := MustCreateRef(refURL.String())
+ if r.IsCanonical() {
+ return refURL.String()
+ }
+
+ baseURL, _ := parseURL(base)
+ if path.IsAbs(refURL.Path) {
+ baseURL.Path = refURL.Path
+ } else if refURL.Path != "" {
+ baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path)
+ }
+ // copying fragment from ref to base
+ baseURL.Fragment = refURL.Fragment
+
+ return baseURL.String()
+}
+
+// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document.
+//
+// When calling this, we assume that:
+// * $ref is a canonical URI
+// * originalRelativeBase is a canonical URI
+//
+// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected.
+// In this case, expansion stops and normally renders the internal canonical $ref.
+//
+// This internal $ref is eventually rebased to the original RelativeBase used for the expansion.
+//
+// There is a special case for schemas that are anchored with an "id":
+// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document.
+// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing.
+func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref {
+ debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id)
+
+ if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly {
+ // short circuit: $ref to current doc
+ return *ref
+ }
+
+ if id != "" {
+ idBaseURL, err := parseURL(id)
+ if err == nil { // if the schema id is not usable as a URI, ignore it
+ if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "")
+ // $ref relative to the ID of the schema in the root document
+ return ref
+ }
+ }
+ }
+
+ originalRelativeBaseURL, _ := parseURL(originalRelativeBase)
+
+ r, _ := rebase(ref, originalRelativeBaseURL, false)
+
+ return r
+}
+
+func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) {
+ var newBase url.URL
+
+ u := ref.GetURL()
+
+ if u.Scheme != v.Scheme || u.Host != v.Host {
+ return *ref, false
+ }
+
+ docPath := v.Path
+ v.Path = path.Dir(v.Path)
+
+ if v.Path == "." {
+ v.Path = ""
+ } else if !strings.HasSuffix(v.Path, "/") {
+ v.Path += "/"
+ }
+
+ newBase.Fragment = u.Fragment
+
+ if strings.HasPrefix(u.Path, docPath) {
+ newBase.Path = strings.TrimPrefix(u.Path, docPath)
+ } else {
+ newBase.Path = strings.TrimPrefix(u.Path, v.Path)
+ }
+
+ if notEqual && newBase.Path == "" && newBase.Fragment == "" {
+ // do not want rebasing to end up in an empty $ref
+ return *ref, false
+ }
+
+ if path.IsAbs(newBase.Path) {
+ // whenever we end up with an absolute path, specify the scheme and host
+ newBase.Scheme = v.Scheme
+ newBase.Host = v.Host
+ }
+
+ return MustCreateRef(newBase.String()), true
+}
+
+// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor
+func normalizeRef(ref *Ref, relativeBase string) *Ref {
+ r := MustCreateRef(normalizeURI(ref.String(), relativeBase))
+ return &r
+}
+
+// normalizeBase performs a normalization of the input base path.
+//
+// This always yields a canonical URI (absolute), usable for the document cache.
+//
+// It ensures that all further internal work on basePath may safely assume
+// a non-empty, cross-platform, canonical URI (i.e. absolute).
+//
+// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this
+// in a file:// URL with lower cased drive letter and path.
+//
+// See also: https://en.wikipedia.org/wiki/File_URI_scheme
+func normalizeBase(in string) string {
+ u, err := parseURL(in)
+ if err != nil {
+ specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err)
+ u, in = repairURI(in)
+ }
+
+ u.Fragment = "" // any fragment in the base is irrelevant
+
+ fixWindowsURI(u, in) // noop on non-windows OS
+
+ u.Path = path.Clean(u.Path)
+ if u.Path == "." { // empty after Clean()
+ u.Path = ""
+ }
+
+ if u.Scheme != "" {
+ if path.IsAbs(u.Path) || u.Scheme != fileScheme {
+ // this is absolute or explicitly not a local file: we're good
+ return u.String()
+ }
+ }
+
+ // no scheme or file scheme with relative path: assume file and make it absolute
+ // enforce scheme file://... with absolute path.
+ //
+ // If the input path is relative, we anchor the path to the current working directory.
+ // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json
+
+ u.Scheme = fileScheme
+ u.Path = absPath(u.Path) // platform-dependent
+ u.RawQuery = "" // any query component is irrelevant for a base
+ return u.String()
+}
diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go
new file mode 100644
index 00000000..f19f1a8f
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go
@@ -0,0 +1,44 @@
+//go:build !windows
+// +build !windows
+
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "net/url"
+ "path/filepath"
+)
+
+// absPath makes a file path absolute and compatible with a URI path component.
+//
+// The parameter must be a path, not an URI.
+func absPath(in string) string {
+ anchored, err := filepath.Abs(in)
+ if err != nil {
+ specLogger.Printf("warning: could not resolve current working directory: %v", err)
+ return in
+ }
+ return anchored
+}
+
+func repairURI(in string) (*url.URL, string) {
+ u, _ := parseURL("")
+ debugLog("repaired URI: original: %q, repaired: %q", in, "")
+ return u, ""
+}
+
+func fixWindowsURI(_ *url.URL, _ string) {
+}
diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go
new file mode 100644
index 00000000..a66c532d
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/normalizer_windows.go
@@ -0,0 +1,154 @@
+// -build windows
+
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+)
+
+// absPath makes a file path absolute and compatible with a URI path component
+//
+// The parameter must be a path, not an URI.
+func absPath(in string) string {
+ // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths.
+ // See https://github.com/golang/go/issues/24441
+ if in == "" {
+ in = "."
+ }
+
+ anchored, err := filepath.Abs(in)
+ if err != nil {
+ specLogger.Printf("warning: could not resolve current working directory: %v", err)
+ return in
+ }
+
+ pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`)
+ if !strings.HasPrefix(pth, "/") {
+ pth = "/" + pth
+ }
+
+ return path.Clean(pth)
+}
+
+// repairURI tolerates invalid file URIs with common typos
+// such as 'file://E:\folder\file', that break the regular URL parser.
+//
+// Adopting the same defaults as for unixes (e.g. return an empty path) would
+// result into a counter-intuitive result for that case (e.g. E:\folder\file is
+// eventually resolved as the current directory). The repair will detect the missing "/".
+//
+// Note that this only works for the file scheme.
+func repairURI(in string) (*url.URL, string) {
+ const prefix = fileScheme + "://"
+ if !strings.HasPrefix(in, prefix) {
+ // giving up: resolve to empty path
+ u, _ := parseURL("")
+
+ return u, ""
+ }
+
+ // attempt the repair, stripping the scheme should be sufficient
+ u, _ := parseURL(strings.TrimPrefix(in, prefix))
+ debugLog("repaired URI: original: %q, repaired: %q", in, u.String())
+
+ return u, u.String()
+}
+
+// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml
+// and makes it a canonical URI: file:///c:/base/file.yaml
+//
+// Catch 22 notes for Windows:
+//
+// * There may be a drive letter on windows (it is lower-cased)
+// * There may be a share UNC, e.g. \\server\folder\data.xml
+// * Paths are case insensitive
+// * Paths may already contain slashes
+// * Paths must be slashed
+//
+// NOTE: there is no escaping. "/" may be valid separators just like "\".
+// We don't use ToSlash() (which escapes everything) because windows now also
+// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work.
+func fixWindowsURI(u *url.URL, in string) {
+ drive := filepath.VolumeName(in)
+
+ if len(drive) > 0 {
+ if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter
+ u.Scheme = fileScheme
+ u.Host = ""
+ u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query)
+ } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume
+ // NOTE: the special host@port syntax for UNC is not supported (yet)
+ u.Scheme = fileScheme
+
+ // this is a modified version of filepath.Dir() to apply on the VolumeName itself
+ i := len(drive) - 1
+ for i >= 0 && !os.IsPathSeparator(drive[i]) {
+ i--
+ }
+ host := drive[:i] // \\host\share => host
+
+ u.Path = strings.TrimPrefix(u.Path, host)
+ u.Host = strings.TrimPrefix(host, `\\`)
+ }
+
+ u.Opaque = ""
+ u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`)
+
+ // ensure we form an absolute path
+ if !strings.HasPrefix(u.Path, "/") {
+ u.Path = "/" + u.Path
+ }
+
+ u.Path = path.Clean(u.Path)
+
+ return
+ }
+
+ if u.Scheme == fileScheme {
+ // Handle dodgy cases for file://{...} URIs on windows.
+ // A canonical URI should always be followed by an absolute path.
+ //
+ // Examples:
+ // * file:///folder/file => valid, unchanged
+ // * file:///c:\folder\file => slashed
+ // * file:///./folder/file => valid, cleaned to remove the dot
+ // * file:///.\folder\file => remapped to cwd
+ // * file:///. => dodgy, remapped to / (consistent with the behavior on unix)
+ // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix)
+ if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) {
+ // ensure we form an absolute path
+ u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`))
+ if !strings.HasPrefix(u.Path, "/") {
+ u.Path = "/" + u.Path
+ }
+ }
+ u.Path = strings.ToLower(u.Path)
+ }
+
+ // NOTE: lower case normalization does not propagate to inner resources,
+ // generated when rebasing: when joining a relative URI with a file to an absolute base,
+ // only the base is currently lower-cased.
+ //
+ // For now, we assume this is good enough for most use cases
+ // and try not to generate too many differences
+ // between the output produced on different platforms.
+ u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`))
+}
diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go
new file mode 100644
index 00000000..a69cca88
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/operation.go
@@ -0,0 +1,400 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "sort"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+func init() {
+ gob.Register(map[string]interface{}{})
+ gob.Register([]interface{}{})
+}
+
+// OperationProps describes an operation
+//
+// NOTES:
+// - schemes, when present must be from [http, https, ws, wss]: see validate
+// - Security is handled as a special case: see MarshalJSON function
+type OperationProps struct {
+ Description string `json:"description,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Produces []string `json:"produces,omitempty"`
+ Schemes []string `json:"schemes,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Summary string `json:"summary,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+ ID string `json:"operationId,omitempty"`
+ Deprecated bool `json:"deprecated,omitempty"`
+ Security []map[string][]string `json:"security,omitempty"`
+ Parameters []Parameter `json:"parameters,omitempty"`
+ Responses *Responses `json:"responses,omitempty"`
+}
+
+// MarshalJSON takes care of serializing operation properties to JSON
+//
+// We use a custom marhaller here to handle a special cases related to
+// the Security field. We need to preserve zero length slice
+// while omitting the field when the value is nil/unset.
+func (op OperationProps) MarshalJSON() ([]byte, error) {
+ type Alias OperationProps
+ if op.Security == nil {
+ return json.Marshal(&struct {
+ Security []map[string][]string `json:"security,omitempty"`
+ *Alias
+ }{
+ Security: op.Security,
+ Alias: (*Alias)(&op),
+ })
+ }
+ return json.Marshal(&struct {
+ Security []map[string][]string `json:"security"`
+ *Alias
+ }{
+ Security: op.Security,
+ Alias: (*Alias)(&op),
+ })
+}
+
+// Operation describes a single API operation on a path.
+//
+// For more information: http://goo.gl/8us55a#operationObject
+type Operation struct {
+ VendorExtensible
+ OperationProps
+}
+
+// SuccessResponse gets a success response model
+func (o *Operation) SuccessResponse() (*Response, int, bool) {
+ if o.Responses == nil {
+ return nil, 0, false
+ }
+
+ responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses))
+ for k := range o.Responses.StatusCodeResponses {
+ if k >= 200 && k < 300 {
+ responseCodes = append(responseCodes, k)
+ }
+ }
+ if len(responseCodes) > 0 {
+ sort.Ints(responseCodes)
+ v := o.Responses.StatusCodeResponses[responseCodes[0]]
+ return &v, responseCodes[0], true
+ }
+
+ return o.Responses.Default, 0, false
+}
+
+// JSONLookup look up a value by the json property name
+func (o Operation) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := o.Extensions[token]; ok {
+ return &ex, nil
+ }
+ r, _, err := jsonpointer.GetForToken(o.OperationProps, token)
+ return r, err
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (o *Operation) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &o.OperationProps); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &o.VendorExtensible)
+}
+
+// MarshalJSON converts this items object to JSON
+func (o Operation) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(o.OperationProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(o.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b1, b2)
+ return concated, nil
+}
+
+// NewOperation creates a new operation instance.
+// It expects an ID as parameter but not passing an ID is also valid.
+func NewOperation(id string) *Operation {
+ op := new(Operation)
+ op.ID = id
+ return op
+}
+
+// WithID sets the ID property on this operation, allows for chaining.
+func (o *Operation) WithID(id string) *Operation {
+ o.ID = id
+ return o
+}
+
+// WithDescription sets the description on this operation, allows for chaining
+func (o *Operation) WithDescription(description string) *Operation {
+ o.Description = description
+ return o
+}
+
+// WithSummary sets the summary on this operation, allows for chaining
+func (o *Operation) WithSummary(summary string) *Operation {
+ o.Summary = summary
+ return o
+}
+
+// WithExternalDocs sets/removes the external docs for/from this operation.
+// When you pass empty strings as params the external documents will be removed.
+// When you pass non-empty string as one value then those values will be used on the external docs object.
+// So when you pass a non-empty description, you should also pass the url and vice versa.
+func (o *Operation) WithExternalDocs(description, url string) *Operation {
+ if description == "" && url == "" {
+ o.ExternalDocs = nil
+ return o
+ }
+
+ if o.ExternalDocs == nil {
+ o.ExternalDocs = &ExternalDocumentation{}
+ }
+ o.ExternalDocs.Description = description
+ o.ExternalDocs.URL = url
+ return o
+}
+
+// Deprecate marks the operation as deprecated
+func (o *Operation) Deprecate() *Operation {
+ o.Deprecated = true
+ return o
+}
+
+// Undeprecate marks the operation as not deprected
+func (o *Operation) Undeprecate() *Operation {
+ o.Deprecated = false
+ return o
+}
+
+// WithConsumes adds media types for incoming body values
+func (o *Operation) WithConsumes(mediaTypes ...string) *Operation {
+ o.Consumes = append(o.Consumes, mediaTypes...)
+ return o
+}
+
+// WithProduces adds media types for outgoing body values
+func (o *Operation) WithProduces(mediaTypes ...string) *Operation {
+ o.Produces = append(o.Produces, mediaTypes...)
+ return o
+}
+
+// WithTags adds tags for this operation
+func (o *Operation) WithTags(tags ...string) *Operation {
+ o.Tags = append(o.Tags, tags...)
+ return o
+}
+
+// AddParam adds a parameter to this operation, when a parameter for that location
+// and with that name already exists it will be replaced
+func (o *Operation) AddParam(param *Parameter) *Operation {
+ if param == nil {
+ return o
+ }
+
+ for i, p := range o.Parameters {
+ if p.Name == param.Name && p.In == param.In {
+ params := make([]Parameter, 0, len(o.Parameters)+1)
+ params = append(params, o.Parameters[:i]...)
+ params = append(params, *param)
+ params = append(params, o.Parameters[i+1:]...)
+ o.Parameters = params
+
+ return o
+ }
+ }
+
+ o.Parameters = append(o.Parameters, *param)
+ return o
+}
+
+// RemoveParam removes a parameter from the operation
+func (o *Operation) RemoveParam(name, in string) *Operation {
+ for i, p := range o.Parameters {
+ if p.Name == name && p.In == in {
+ o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...)
+ return o
+ }
+ }
+ return o
+}
+
+// SecuredWith adds a security scope to this operation.
+func (o *Operation) SecuredWith(name string, scopes ...string) *Operation {
+ o.Security = append(o.Security, map[string][]string{name: scopes})
+ return o
+}
+
+// WithDefaultResponse adds a default response to the operation.
+// Passing a nil value will remove the response
+func (o *Operation) WithDefaultResponse(response *Response) *Operation {
+ return o.RespondsWith(0, response)
+}
+
+// RespondsWith adds a status code response to the operation.
+// When the code is 0 the value of the response will be used as default response value.
+// When the value of the response is nil it will be removed from the operation
+func (o *Operation) RespondsWith(code int, response *Response) *Operation {
+ if o.Responses == nil {
+ o.Responses = new(Responses)
+ }
+ if code == 0 {
+ o.Responses.Default = response
+ return o
+ }
+ if response == nil {
+ delete(o.Responses.StatusCodeResponses, code)
+ return o
+ }
+ if o.Responses.StatusCodeResponses == nil {
+ o.Responses.StatusCodeResponses = make(map[int]Response)
+ }
+ o.Responses.StatusCodeResponses[code] = *response
+ return o
+}
+
+type opsAlias OperationProps
+
+type gobAlias struct {
+ Security []map[string]struct {
+ List []string
+ Pad bool
+ }
+ Alias *opsAlias
+ SecurityIsEmpty bool
+}
+
+// GobEncode provides a safe gob encoder for Operation, including empty security requirements
+func (o Operation) GobEncode() ([]byte, error) {
+ raw := struct {
+ Ext VendorExtensible
+ Props OperationProps
+ }{
+ Ext: o.VendorExtensible,
+ Props: o.OperationProps,
+ }
+ var b bytes.Buffer
+ err := gob.NewEncoder(&b).Encode(raw)
+ return b.Bytes(), err
+}
+
+// GobDecode provides a safe gob decoder for Operation, including empty security requirements
+func (o *Operation) GobDecode(b []byte) error {
+ var raw struct {
+ Ext VendorExtensible
+ Props OperationProps
+ }
+
+ buf := bytes.NewBuffer(b)
+ err := gob.NewDecoder(buf).Decode(&raw)
+ if err != nil {
+ return err
+ }
+ o.VendorExtensible = raw.Ext
+ o.OperationProps = raw.Props
+ return nil
+}
+
+// GobEncode provides a safe gob encoder for Operation, including empty security requirements
+func (op OperationProps) GobEncode() ([]byte, error) {
+ raw := gobAlias{
+ Alias: (*opsAlias)(&op),
+ }
+
+ var b bytes.Buffer
+ if op.Security == nil {
+ // nil security requirement
+ err := gob.NewEncoder(&b).Encode(raw)
+ return b.Bytes(), err
+ }
+
+ if len(op.Security) == 0 {
+ // empty, but non-nil security requirement
+ raw.SecurityIsEmpty = true
+ raw.Alias.Security = nil
+ err := gob.NewEncoder(&b).Encode(raw)
+ return b.Bytes(), err
+ }
+
+ raw.Security = make([]map[string]struct {
+ List []string
+ Pad bool
+ }, 0, len(op.Security))
+ for _, req := range op.Security {
+ v := make(map[string]struct {
+ List []string
+ Pad bool
+ }, len(req))
+ for k, val := range req {
+ v[k] = struct {
+ List []string
+ Pad bool
+ }{
+ List: val,
+ }
+ }
+ raw.Security = append(raw.Security, v)
+ }
+
+ err := gob.NewEncoder(&b).Encode(raw)
+ return b.Bytes(), err
+}
+
+// GobDecode provides a safe gob decoder for Operation, including empty security requirements
+func (op *OperationProps) GobDecode(b []byte) error {
+ var raw gobAlias
+
+ buf := bytes.NewBuffer(b)
+ err := gob.NewDecoder(buf).Decode(&raw)
+ if err != nil {
+ return err
+ }
+ if raw.Alias == nil {
+ return nil
+ }
+
+ switch {
+ case raw.SecurityIsEmpty:
+ // empty, but non-nil security requirement
+ raw.Alias.Security = []map[string][]string{}
+ case len(raw.Alias.Security) == 0:
+ // nil security requirement
+ raw.Alias.Security = nil
+ default:
+ raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security))
+ for _, req := range raw.Security {
+ v := make(map[string][]string, len(req))
+ for k, val := range req {
+ v[k] = make([]string, 0, len(val.List))
+ v[k] = append(v[k], val.List...)
+ }
+ raw.Alias.Security = append(raw.Alias.Security, v)
+ }
+ }
+
+ *op = *(*OperationProps)(raw.Alias)
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go
new file mode 100644
index 00000000..bd4f1cdb
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/parameter.go
@@ -0,0 +1,326 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// QueryParam creates a query parameter
+func QueryParam(name string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}}
+}
+
+// HeaderParam creates a header parameter, this is always required by default
+func HeaderParam(name string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}}
+}
+
+// PathParam creates a path parameter, this is always required
+func PathParam(name string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}}
+}
+
+// BodyParam creates a body parameter
+func BodyParam(name string, schema *Schema) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}}
+}
+
+// FormDataParam creates a body parameter
+func FormDataParam(name string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}}
+}
+
+// FileParam creates a body parameter
+func FileParam(name string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"},
+ SimpleSchema: SimpleSchema{Type: "file"}}
+}
+
+// SimpleArrayParam creates a param for a simple array (string, int, date etc)
+func SimpleArrayParam(name, tpe, fmt string) *Parameter {
+ return &Parameter{ParamProps: ParamProps{Name: name},
+ SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv",
+ Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}}
+}
+
+// ParamRef creates a parameter that's a json reference
+func ParamRef(uri string) *Parameter {
+ p := new(Parameter)
+ p.Ref = MustCreateRef(uri)
+ return p
+}
+
+// ParamProps describes the specific attributes of an operation parameter
+//
+// NOTE:
+// - Schema is defined when "in" == "body": see validate
+// - AllowEmptyValue is allowed where "in" == "query" || "formData"
+type ParamProps struct {
+ Description string `json:"description,omitempty"`
+ Name string `json:"name,omitempty"`
+ In string `json:"in,omitempty"`
+ Required bool `json:"required,omitempty"`
+ Schema *Schema `json:"schema,omitempty"`
+ AllowEmptyValue bool `json:"allowEmptyValue,omitempty"`
+}
+
+// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn).
+//
+// There are five possible parameter types.
+// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part
+// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`,
+// the path parameter is `itemId`.
+// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`.
+// - Header - Custom headers that are expected as part of the request.
+// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be
+// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for
+// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist
+// together for the same operation.
+// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or
+// `multipart/form-data` are used as the content type of the request (in Swagger's definition,
+// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used
+// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be
+// declared together with a body parameter for the same operation. Form parameters have a different format based on
+// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4).
+// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload.
+// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple
+// parameters that are being transferred.
+// - `multipart/form-data` - each parameter takes a section in the payload with an internal header.
+// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is
+// `submit-name`. This type of form parameters is more commonly used for file transfers.
+//
+// For more information: http://goo.gl/8us55a#parameterObject
+type Parameter struct {
+ Refable
+ CommonValidations
+ SimpleSchema
+ VendorExtensible
+ ParamProps
+}
+
+// JSONLookup look up a value by the json property name
+func (p Parameter) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := p.Extensions[token]; ok {
+ return &ex, nil
+ }
+ if token == jsonRef {
+ return &p.Ref, nil
+ }
+
+ r, _, err := jsonpointer.GetForToken(p.CommonValidations, token)
+ if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
+ return nil, err
+ }
+ if r != nil {
+ return r, nil
+ }
+ r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token)
+ if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
+ return nil, err
+ }
+ if r != nil {
+ return r, nil
+ }
+ r, _, err = jsonpointer.GetForToken(p.ParamProps, token)
+ return r, err
+}
+
+// WithDescription a fluent builder method for the description of the parameter
+func (p *Parameter) WithDescription(description string) *Parameter {
+ p.Description = description
+ return p
+}
+
+// Named a fluent builder method to override the name of the parameter
+func (p *Parameter) Named(name string) *Parameter {
+ p.Name = name
+ return p
+}
+
+// WithLocation a fluent builder method to override the location of the parameter
+func (p *Parameter) WithLocation(in string) *Parameter {
+ p.In = in
+ return p
+}
+
+// Typed a fluent builder method for the type of the parameter value
+func (p *Parameter) Typed(tpe, format string) *Parameter {
+ p.Type = tpe
+ p.Format = format
+ return p
+}
+
+// CollectionOf a fluent builder method for an array parameter
+func (p *Parameter) CollectionOf(items *Items, format string) *Parameter {
+ p.Type = jsonArray
+ p.Items = items
+ p.CollectionFormat = format
+ return p
+}
+
+// WithDefault sets the default value on this parameter
+func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter {
+ p.AsOptional() // with default implies optional
+ p.Default = defaultValue
+ return p
+}
+
+// AllowsEmptyValues flags this parameter as being ok with empty values
+func (p *Parameter) AllowsEmptyValues() *Parameter {
+ p.AllowEmptyValue = true
+ return p
+}
+
+// NoEmptyValues flags this parameter as not liking empty values
+func (p *Parameter) NoEmptyValues() *Parameter {
+ p.AllowEmptyValue = false
+ return p
+}
+
+// AsOptional flags this parameter as optional
+func (p *Parameter) AsOptional() *Parameter {
+ p.Required = false
+ return p
+}
+
+// AsRequired flags this parameter as required
+func (p *Parameter) AsRequired() *Parameter {
+ if p.Default != nil { // with a default required makes no sense
+ return p
+ }
+ p.Required = true
+ return p
+}
+
+// WithMaxLength sets a max length value
+func (p *Parameter) WithMaxLength(max int64) *Parameter {
+ p.MaxLength = &max
+ return p
+}
+
+// WithMinLength sets a min length value
+func (p *Parameter) WithMinLength(min int64) *Parameter {
+ p.MinLength = &min
+ return p
+}
+
+// WithPattern sets a pattern value
+func (p *Parameter) WithPattern(pattern string) *Parameter {
+ p.Pattern = pattern
+ return p
+}
+
+// WithMultipleOf sets a multiple of value
+func (p *Parameter) WithMultipleOf(number float64) *Parameter {
+ p.MultipleOf = &number
+ return p
+}
+
+// WithMaximum sets a maximum number value
+func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter {
+ p.Maximum = &max
+ p.ExclusiveMaximum = exclusive
+ return p
+}
+
+// WithMinimum sets a minimum number value
+func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter {
+ p.Minimum = &min
+ p.ExclusiveMinimum = exclusive
+ return p
+}
+
+// WithEnum sets a the enum values (replace)
+func (p *Parameter) WithEnum(values ...interface{}) *Parameter {
+ p.Enum = append([]interface{}{}, values...)
+ return p
+}
+
+// WithMaxItems sets the max items
+func (p *Parameter) WithMaxItems(size int64) *Parameter {
+ p.MaxItems = &size
+ return p
+}
+
+// WithMinItems sets the min items
+func (p *Parameter) WithMinItems(size int64) *Parameter {
+ p.MinItems = &size
+ return p
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (p *Parameter) UniqueValues() *Parameter {
+ p.UniqueItems = true
+ return p
+}
+
+// AllowDuplicates this array can have duplicates
+func (p *Parameter) AllowDuplicates() *Parameter {
+ p.UniqueItems = false
+ return p
+}
+
+// WithValidations is a fluent method to set parameter validations
+func (p *Parameter) WithValidations(val CommonValidations) *Parameter {
+ p.SetValidations(SchemaValidations{CommonValidations: val})
+ return p
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *Parameter) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &p.CommonValidations); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.Refable); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.SimpleSchema); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.VendorExtensible); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &p.ParamProps)
+}
+
+// MarshalJSON converts this items object to JSON
+func (p Parameter) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(p.CommonValidations)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(p.SimpleSchema)
+ if err != nil {
+ return nil, err
+ }
+ b3, err := json.Marshal(p.Refable)
+ if err != nil {
+ return nil, err
+ }
+ b4, err := json.Marshal(p.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ b5, err := json.Marshal(p.ParamProps)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b3, b1, b2, b4, b5), nil
+}
diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go
new file mode 100644
index 00000000..68fc8e90
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/path_item.go
@@ -0,0 +1,87 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// PathItemProps the path item specific properties
+type PathItemProps struct {
+ Get *Operation `json:"get,omitempty"`
+ Put *Operation `json:"put,omitempty"`
+ Post *Operation `json:"post,omitempty"`
+ Delete *Operation `json:"delete,omitempty"`
+ Options *Operation `json:"options,omitempty"`
+ Head *Operation `json:"head,omitempty"`
+ Patch *Operation `json:"patch,omitempty"`
+ Parameters []Parameter `json:"parameters,omitempty"`
+}
+
+// PathItem describes the operations available on a single path.
+// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
+// The path itself is still exposed to the documentation viewer but they will
+// not know which operations and parameters are available.
+//
+// For more information: http://goo.gl/8us55a#pathItemObject
+type PathItem struct {
+ Refable
+ VendorExtensible
+ PathItemProps
+}
+
+// JSONLookup look up a value by the json property name
+func (p PathItem) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := p.Extensions[token]; ok {
+ return &ex, nil
+ }
+ if token == jsonRef {
+ return &p.Ref, nil
+ }
+ r, _, err := jsonpointer.GetForToken(p.PathItemProps, token)
+ return r, err
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *PathItem) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &p.Refable); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.VendorExtensible); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &p.PathItemProps)
+}
+
+// MarshalJSON converts this items object to JSON
+func (p PathItem) MarshalJSON() ([]byte, error) {
+ b3, err := json.Marshal(p.Refable)
+ if err != nil {
+ return nil, err
+ }
+ b4, err := json.Marshal(p.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ b5, err := json.Marshal(p.PathItemProps)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b3, b4, b5)
+ return concated, nil
+}
diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go
new file mode 100644
index 00000000..9dc82a29
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/paths.go
@@ -0,0 +1,97 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/go-openapi/swag"
+)
+
+// Paths holds the relative paths to the individual endpoints.
+// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order
+// to construct the full URL.
+// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
+//
+// For more information: http://goo.gl/8us55a#pathsObject
+type Paths struct {
+ VendorExtensible
+ Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/"
+}
+
+// JSONLookup look up a value by the json property name
+func (p Paths) JSONLookup(token string) (interface{}, error) {
+ if pi, ok := p.Paths[token]; ok {
+ return &pi, nil
+ }
+ if ex, ok := p.Extensions[token]; ok {
+ return &ex, nil
+ }
+ return nil, fmt.Errorf("object has no field %q", token)
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *Paths) UnmarshalJSON(data []byte) error {
+ var res map[string]json.RawMessage
+ if err := json.Unmarshal(data, &res); err != nil {
+ return err
+ }
+ for k, v := range res {
+ if strings.HasPrefix(strings.ToLower(k), "x-") {
+ if p.Extensions == nil {
+ p.Extensions = make(map[string]interface{})
+ }
+ var d interface{}
+ if err := json.Unmarshal(v, &d); err != nil {
+ return err
+ }
+ p.Extensions[k] = d
+ }
+ if strings.HasPrefix(k, "/") {
+ if p.Paths == nil {
+ p.Paths = make(map[string]PathItem)
+ }
+ var pi PathItem
+ if err := json.Unmarshal(v, &pi); err != nil {
+ return err
+ }
+ p.Paths[k] = pi
+ }
+ }
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (p Paths) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(p.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+
+ pths := make(map[string]PathItem)
+ for k, v := range p.Paths {
+ if strings.HasPrefix(k, "/") {
+ pths[k] = v
+ }
+ }
+ b2, err := json.Marshal(pths)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b1, b2)
+ return concated, nil
+}
diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go
new file mode 100644
index 00000000..91d2435f
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/properties.go
@@ -0,0 +1,91 @@
+package spec
+
+import (
+ "bytes"
+ "encoding/json"
+ "reflect"
+ "sort"
+)
+
+// OrderSchemaItem holds a named schema (e.g. from a property of an object)
+type OrderSchemaItem struct {
+ Name string
+ Schema
+}
+
+// OrderSchemaItems is a sortable slice of named schemas.
+// The ordering is defined by the x-order schema extension.
+type OrderSchemaItems []OrderSchemaItem
+
+// MarshalJSON produces a json object with keys defined by the name schemas
+// of the OrderSchemaItems slice, keeping the original order of the slice.
+func (items OrderSchemaItems) MarshalJSON() ([]byte, error) {
+ buf := bytes.NewBuffer(nil)
+ buf.WriteString("{")
+ for i := range items {
+ if i > 0 {
+ buf.WriteString(",")
+ }
+ buf.WriteString("\"")
+ buf.WriteString(items[i].Name)
+ buf.WriteString("\":")
+ bs, err := json.Marshal(&items[i].Schema)
+ if err != nil {
+ return nil, err
+ }
+ buf.Write(bs)
+ }
+ buf.WriteString("}")
+ return buf.Bytes(), nil
+}
+
+func (items OrderSchemaItems) Len() int { return len(items) }
+func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] }
+func (items OrderSchemaItems) Less(i, j int) (ret bool) {
+ ii, oki := items[i].Extensions.GetInt("x-order")
+ ij, okj := items[j].Extensions.GetInt("x-order")
+ if oki {
+ if okj {
+ defer func() {
+ if err := recover(); err != nil {
+ defer func() {
+ if err = recover(); err != nil {
+ ret = items[i].Name < items[j].Name
+ }
+ }()
+ ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String()
+ }
+ }()
+ return ii < ij
+ }
+ return true
+ } else if okj {
+ return false
+ }
+ return items[i].Name < items[j].Name
+}
+
+// SchemaProperties is a map representing the properties of a Schema object.
+// It knows how to transform its keys into an ordered slice.
+type SchemaProperties map[string]Schema
+
+// ToOrderedSchemaItems transforms the map of properties into a sortable slice
+func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems {
+ items := make(OrderSchemaItems, 0, len(properties))
+ for k, v := range properties {
+ items = append(items, OrderSchemaItem{
+ Name: k,
+ Schema: v,
+ })
+ }
+ sort.Sort(items)
+ return items
+}
+
+// MarshalJSON produces properties as json, keeping their order.
+func (properties SchemaProperties) MarshalJSON() ([]byte, error) {
+ if properties == nil {
+ return []byte("null"), nil
+ }
+ return json.Marshal(properties.ToOrderedSchemaItems())
+}
diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go
new file mode 100644
index 00000000..b0ef9bd9
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/ref.go
@@ -0,0 +1,193 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "net/http"
+ "os"
+ "path/filepath"
+
+ "github.com/go-openapi/jsonreference"
+)
+
+// Refable is a struct for things that accept a $ref property
+type Refable struct {
+ Ref Ref
+}
+
+// MarshalJSON marshals the ref to json
+func (r Refable) MarshalJSON() ([]byte, error) {
+ return r.Ref.MarshalJSON()
+}
+
+// UnmarshalJSON unmarshalss the ref from json
+func (r *Refable) UnmarshalJSON(d []byte) error {
+ return json.Unmarshal(d, &r.Ref)
+}
+
+// Ref represents a json reference that is potentially resolved
+type Ref struct {
+ jsonreference.Ref
+}
+
+// RemoteURI gets the remote uri part of the ref
+func (r *Ref) RemoteURI() string {
+ if r.String() == "" {
+ return ""
+ }
+
+ u := *r.GetURL()
+ u.Fragment = ""
+ return u.String()
+}
+
+// IsValidURI returns true when the url the ref points to can be found
+func (r *Ref) IsValidURI(basepaths ...string) bool {
+ if r.String() == "" {
+ return true
+ }
+
+ v := r.RemoteURI()
+ if v == "" {
+ return true
+ }
+
+ if r.HasFullURL {
+ //nolint:noctx,gosec
+ rr, err := http.Get(v)
+ if err != nil {
+ return false
+ }
+ defer rr.Body.Close()
+
+ return rr.StatusCode/100 == 2
+ }
+
+ if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) {
+ return false
+ }
+
+ // check for local file
+ pth := v
+ if r.HasURLPathOnly {
+ base := "."
+ if len(basepaths) > 0 {
+ base = filepath.Dir(filepath.Join(basepaths...))
+ }
+ p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth)))
+ if e != nil {
+ return false
+ }
+ pth = p
+ }
+
+ fi, err := os.Stat(filepath.ToSlash(pth))
+ if err != nil {
+ return false
+ }
+
+ return !fi.IsDir()
+}
+
+// Inherits creates a new reference from a parent and a child
+// If the child cannot inherit from the parent, an error is returned
+func (r *Ref) Inherits(child Ref) (*Ref, error) {
+ ref, err := r.Ref.Inherits(child.Ref)
+ if err != nil {
+ return nil, err
+ }
+ return &Ref{Ref: *ref}, nil
+}
+
+// NewRef creates a new instance of a ref object
+// returns an error when the reference uri is an invalid uri
+func NewRef(refURI string) (Ref, error) {
+ ref, err := jsonreference.New(refURI)
+ if err != nil {
+ return Ref{}, err
+ }
+ return Ref{Ref: ref}, nil
+}
+
+// MustCreateRef creates a ref object but panics when refURI is invalid.
+// Use the NewRef method for a version that returns an error.
+func MustCreateRef(refURI string) Ref {
+ return Ref{Ref: jsonreference.MustCreateRef(refURI)}
+}
+
+// MarshalJSON marshals this ref into a JSON object
+func (r Ref) MarshalJSON() ([]byte, error) {
+ str := r.String()
+ if str == "" {
+ if r.IsRoot() {
+ return []byte(`{"$ref":""}`), nil
+ }
+ return []byte("{}"), nil
+ }
+ v := map[string]interface{}{"$ref": str}
+ return json.Marshal(v)
+}
+
+// UnmarshalJSON unmarshals this ref from a JSON object
+func (r *Ref) UnmarshalJSON(d []byte) error {
+ var v map[string]interface{}
+ if err := json.Unmarshal(d, &v); err != nil {
+ return err
+ }
+ return r.fromMap(v)
+}
+
+// GobEncode provides a safe gob encoder for Ref
+func (r Ref) GobEncode() ([]byte, error) {
+ var b bytes.Buffer
+ raw, err := r.MarshalJSON()
+ if err != nil {
+ return nil, err
+ }
+ err = gob.NewEncoder(&b).Encode(raw)
+ return b.Bytes(), err
+}
+
+// GobDecode provides a safe gob decoder for Ref
+func (r *Ref) GobDecode(b []byte) error {
+ var raw []byte
+ buf := bytes.NewBuffer(b)
+ err := gob.NewDecoder(buf).Decode(&raw)
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal(raw, r)
+}
+
+func (r *Ref) fromMap(v map[string]interface{}) error {
+ if v == nil {
+ return nil
+ }
+
+ if vv, ok := v["$ref"]; ok {
+ if str, ok := vv.(string); ok {
+ ref, err := jsonreference.New(str)
+ if err != nil {
+ return err
+ }
+ *r = Ref{Ref: ref}
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go
new file mode 100644
index 00000000..47d1ee13
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/resolver.go
@@ -0,0 +1,127 @@
+package spec
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/swag"
+)
+
+func resolveAnyWithBase(root interface{}, ref *Ref, result interface{}, options *ExpandOptions) error {
+ options = optionsOrDefault(options)
+ resolver := defaultSchemaLoader(root, options, nil, nil)
+
+ if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ResolveRefWithBase resolves a reference against a context root with preservation of base path
+func ResolveRefWithBase(root interface{}, ref *Ref, options *ExpandOptions) (*Schema, error) {
+ result := new(Schema)
+
+ if err := resolveAnyWithBase(root, ref, result, options); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// ResolveRef resolves a reference for a schema against a context root
+// ref is guaranteed to be in root (no need to go to external files)
+//
+// ResolveRef is ONLY called from the code generation module
+func ResolveRef(root interface{}, ref *Ref) (*Schema, error) {
+ res, _, err := ref.GetPointer().Get(root)
+ if err != nil {
+ return nil, err
+ }
+
+ switch sch := res.(type) {
+ case Schema:
+ return &sch, nil
+ case *Schema:
+ return sch, nil
+ case map[string]interface{}:
+ newSch := new(Schema)
+ if err = swag.DynamicJSONToStruct(sch, newSch); err != nil {
+ return nil, err
+ }
+ return newSch, nil
+ default:
+ return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference)
+ }
+}
+
+// ResolveParameterWithBase resolves a parameter reference against a context root and base path
+func ResolveParameterWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Parameter, error) {
+ result := new(Parameter)
+
+ if err := resolveAnyWithBase(root, &ref, result, options); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// ResolveParameter resolves a parameter reference against a context root
+func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) {
+ return ResolveParameterWithBase(root, ref, nil)
+}
+
+// ResolveResponseWithBase resolves response a reference against a context root and base path
+func ResolveResponseWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Response, error) {
+ result := new(Response)
+
+ err := resolveAnyWithBase(root, &ref, result, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// ResolveResponse resolves response a reference against a context root
+func ResolveResponse(root interface{}, ref Ref) (*Response, error) {
+ return ResolveResponseWithBase(root, ref, nil)
+}
+
+// ResolvePathItemWithBase resolves response a path item against a context root and base path
+func ResolvePathItemWithBase(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) {
+ result := new(PathItem)
+
+ if err := resolveAnyWithBase(root, &ref, result, options); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// ResolvePathItem resolves response a path item against a context root and base path
+//
+// Deprecated: use ResolvePathItemWithBase instead
+func ResolvePathItem(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) {
+ return ResolvePathItemWithBase(root, ref, options)
+}
+
+// ResolveItemsWithBase resolves parameter items reference against a context root and base path.
+//
+// NOTE: stricly speaking, this construct is not supported by Swagger 2.0.
+// Similarly, $ref are forbidden in response headers.
+func ResolveItemsWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) {
+ result := new(Items)
+
+ if err := resolveAnyWithBase(root, &ref, result, options); err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// ResolveItems resolves parameter items reference against a context root and base path.
+//
+// Deprecated: use ResolveItemsWithBase instead
+func ResolveItems(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) {
+ return ResolveItemsWithBase(root, ref, options)
+}
diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go
new file mode 100644
index 00000000..0340b60d
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/response.go
@@ -0,0 +1,152 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// ResponseProps properties specific to a response
+type ResponseProps struct {
+ Description string `json:"description"`
+ Schema *Schema `json:"schema,omitempty"`
+ Headers map[string]Header `json:"headers,omitempty"`
+ Examples map[string]interface{} `json:"examples,omitempty"`
+}
+
+// Response describes a single response from an API Operation.
+//
+// For more information: http://goo.gl/8us55a#responseObject
+type Response struct {
+ Refable
+ ResponseProps
+ VendorExtensible
+}
+
+// JSONLookup look up a value by the json property name
+func (r Response) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := r.Extensions[token]; ok {
+ return &ex, nil
+ }
+ if token == "$ref" {
+ return &r.Ref, nil
+ }
+ ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token)
+ return ptr, err
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (r *Response) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &r.ResponseProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &r.Refable); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &r.VendorExtensible)
+}
+
+// MarshalJSON converts this items object to JSON
+func (r Response) MarshalJSON() ([]byte, error) {
+ var (
+ b1 []byte
+ err error
+ )
+
+ if r.Ref.String() == "" {
+ // when there is no $ref, empty description is rendered as an empty string
+ b1, err = json.Marshal(r.ResponseProps)
+ } else {
+ // when there is $ref inside the schema, description should be omitempty-ied
+ b1, err = json.Marshal(struct {
+ Description string `json:"description,omitempty"`
+ Schema *Schema `json:"schema,omitempty"`
+ Headers map[string]Header `json:"headers,omitempty"`
+ Examples map[string]interface{} `json:"examples,omitempty"`
+ }{
+ Description: r.ResponseProps.Description,
+ Schema: r.ResponseProps.Schema,
+ Examples: r.ResponseProps.Examples,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ b2, err := json.Marshal(r.Refable)
+ if err != nil {
+ return nil, err
+ }
+ b3, err := json.Marshal(r.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// NewResponse creates a new response instance
+func NewResponse() *Response {
+ return new(Response)
+}
+
+// ResponseRef creates a response as a json reference
+func ResponseRef(url string) *Response {
+ resp := NewResponse()
+ resp.Ref = MustCreateRef(url)
+ return resp
+}
+
+// WithDescription sets the description on this response, allows for chaining
+func (r *Response) WithDescription(description string) *Response {
+ r.Description = description
+ return r
+}
+
+// WithSchema sets the schema on this response, allows for chaining.
+// Passing a nil argument removes the schema from this response
+func (r *Response) WithSchema(schema *Schema) *Response {
+ r.Schema = schema
+ return r
+}
+
+// AddHeader adds a header to this response
+func (r *Response) AddHeader(name string, header *Header) *Response {
+ if header == nil {
+ return r.RemoveHeader(name)
+ }
+ if r.Headers == nil {
+ r.Headers = make(map[string]Header)
+ }
+ r.Headers[name] = *header
+ return r
+}
+
+// RemoveHeader removes a header from this response
+func (r *Response) RemoveHeader(name string) *Response {
+ delete(r.Headers, name)
+ return r
+}
+
+// AddExample adds an example to this response
+func (r *Response) AddExample(mediaType string, example interface{}) *Response {
+ if r.Examples == nil {
+ r.Examples = make(map[string]interface{})
+ }
+ r.Examples[mediaType] = example
+ return r
+}
diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go
new file mode 100644
index 00000000..16c3076f
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/responses.go
@@ -0,0 +1,140 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/go-openapi/swag"
+)
+
+// Responses is a container for the expected responses of an operation.
+// The container maps a HTTP response code to the expected response.
+// It is not expected from the documentation to necessarily cover all possible HTTP response codes,
+// since they may not be known in advance. However, it is expected from the documentation to cover
+// a successful operation response and any known errors.
+//
+// The `default` can be used a default response object for all HTTP codes that are not covered
+// individually by the specification.
+//
+// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response
+// for a successful operation call.
+//
+// For more information: http://goo.gl/8us55a#responsesObject
+type Responses struct {
+ VendorExtensible
+ ResponsesProps
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (r Responses) JSONLookup(token string) (interface{}, error) {
+ if token == "default" {
+ return r.Default, nil
+ }
+ if ex, ok := r.Extensions[token]; ok {
+ return &ex, nil
+ }
+ if i, err := strconv.Atoi(token); err == nil {
+ if scr, ok := r.StatusCodeResponses[i]; ok {
+ return scr, nil
+ }
+ }
+ return nil, fmt.Errorf("object has no field %q", token)
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (r *Responses) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &r.ResponsesProps); err != nil {
+ return err
+ }
+
+ if err := json.Unmarshal(data, &r.VendorExtensible); err != nil {
+ return err
+ }
+ if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) {
+ r.ResponsesProps = ResponsesProps{}
+ }
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (r Responses) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(r.ResponsesProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(r.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b1, b2)
+ return concated, nil
+}
+
+// ResponsesProps describes all responses for an operation.
+// It tells what is the default response and maps all responses with a
+// HTTP status code.
+type ResponsesProps struct {
+ Default *Response
+ StatusCodeResponses map[int]Response
+}
+
+// MarshalJSON marshals responses as JSON
+func (r ResponsesProps) MarshalJSON() ([]byte, error) {
+ toser := map[string]Response{}
+ if r.Default != nil {
+ toser["default"] = *r.Default
+ }
+ for k, v := range r.StatusCodeResponses {
+ toser[strconv.Itoa(k)] = v
+ }
+ return json.Marshal(toser)
+}
+
+// UnmarshalJSON unmarshals responses from JSON
+func (r *ResponsesProps) UnmarshalJSON(data []byte) error {
+ var res map[string]json.RawMessage
+ if err := json.Unmarshal(data, &res); err != nil {
+ return err
+ }
+
+ if v, ok := res["default"]; ok {
+ var defaultRes Response
+ if err := json.Unmarshal(v, &defaultRes); err != nil {
+ return err
+ }
+ r.Default = &defaultRes
+ delete(res, "default")
+ }
+ for k, v := range res {
+ if !strings.HasPrefix(k, "x-") {
+ var statusCodeResp Response
+ if err := json.Unmarshal(v, &statusCodeResp); err != nil {
+ return err
+ }
+ if nk, err := strconv.Atoi(k); err == nil {
+ if r.StatusCodeResponses == nil {
+ r.StatusCodeResponses = map[int]Response{}
+ }
+ r.StatusCodeResponses[nk] = statusCodeResp
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go
new file mode 100644
index 00000000..4e9be857
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/schema.go
@@ -0,0 +1,645 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// BooleanProperty creates a boolean property
+func BooleanProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}}
+}
+
+// BoolProperty creates a boolean property
+func BoolProperty() *Schema { return BooleanProperty() }
+
+// StringProperty creates a string property
+func StringProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}
+}
+
+// CharProperty creates a string property
+func CharProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}
+}
+
+// Float64Property creates a float64/double property
+func Float64Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}}
+}
+
+// Float32Property creates a float32/float property
+func Float32Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}}
+}
+
+// Int8Property creates an int8 property
+func Int8Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}}
+}
+
+// Int16Property creates an int16 property
+func Int16Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}}
+}
+
+// Int32Property creates an int32 property
+func Int32Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}}
+}
+
+// Int64Property creates an int64 property
+func Int64Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}}
+}
+
+// StrFmtProperty creates a property for the named string format
+func StrFmtProperty(format string) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}}
+}
+
+// DateProperty creates a date property
+func DateProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}}
+}
+
+// DateTimeProperty creates a date time property
+func DateTimeProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}}
+}
+
+// MapProperty creates a map property
+func MapProperty(property *Schema) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"object"},
+ AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}}
+}
+
+// RefProperty creates a ref property
+func RefProperty(name string) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}}
+}
+
+// RefSchema creates a ref property
+func RefSchema(name string) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}}
+}
+
+// ArrayProperty creates an array property
+func ArrayProperty(items *Schema) *Schema {
+ if items == nil {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}}
+ }
+ return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}}
+}
+
+// ComposedSchema creates a schema with allOf
+func ComposedSchema(schemas ...Schema) *Schema {
+ s := new(Schema)
+ s.AllOf = schemas
+ return s
+}
+
+// SchemaURL represents a schema url
+type SchemaURL string
+
+// MarshalJSON marshal this to JSON
+func (r SchemaURL) MarshalJSON() ([]byte, error) {
+ if r == "" {
+ return []byte("{}"), nil
+ }
+ v := map[string]interface{}{"$schema": string(r)}
+ return json.Marshal(v)
+}
+
+// UnmarshalJSON unmarshal this from JSON
+func (r *SchemaURL) UnmarshalJSON(data []byte) error {
+ var v map[string]interface{}
+ if err := json.Unmarshal(data, &v); err != nil {
+ return err
+ }
+ return r.fromMap(v)
+}
+
+func (r *SchemaURL) fromMap(v map[string]interface{}) error {
+ if v == nil {
+ return nil
+ }
+ if vv, ok := v["$schema"]; ok {
+ if str, ok := vv.(string); ok {
+ u, err := parseURL(str)
+ if err != nil {
+ return err
+ }
+
+ *r = SchemaURL(u.String())
+ }
+ }
+ return nil
+}
+
+// SchemaProps describes a JSON schema (draft 4)
+type SchemaProps struct {
+ ID string `json:"id,omitempty"`
+ Ref Ref `json:"-"`
+ Schema SchemaURL `json:"-"`
+ Description string `json:"description,omitempty"`
+ Type StringOrArray `json:"type,omitempty"`
+ Nullable bool `json:"nullable,omitempty"`
+ Format string `json:"format,omitempty"`
+ Title string `json:"title,omitempty"`
+ Default interface{} `json:"default,omitempty"`
+ Maximum *float64 `json:"maximum,omitempty"`
+ ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"`
+ Minimum *float64 `json:"minimum,omitempty"`
+ ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"`
+ MaxLength *int64 `json:"maxLength,omitempty"`
+ MinLength *int64 `json:"minLength,omitempty"`
+ Pattern string `json:"pattern,omitempty"`
+ MaxItems *int64 `json:"maxItems,omitempty"`
+ MinItems *int64 `json:"minItems,omitempty"`
+ UniqueItems bool `json:"uniqueItems,omitempty"`
+ MultipleOf *float64 `json:"multipleOf,omitempty"`
+ Enum []interface{} `json:"enum,omitempty"`
+ MaxProperties *int64 `json:"maxProperties,omitempty"`
+ MinProperties *int64 `json:"minProperties,omitempty"`
+ Required []string `json:"required,omitempty"`
+ Items *SchemaOrArray `json:"items,omitempty"`
+ AllOf []Schema `json:"allOf,omitempty"`
+ OneOf []Schema `json:"oneOf,omitempty"`
+ AnyOf []Schema `json:"anyOf,omitempty"`
+ Not *Schema `json:"not,omitempty"`
+ Properties SchemaProperties `json:"properties,omitempty"`
+ AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"`
+ PatternProperties SchemaProperties `json:"patternProperties,omitempty"`
+ Dependencies Dependencies `json:"dependencies,omitempty"`
+ AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"`
+ Definitions Definitions `json:"definitions,omitempty"`
+}
+
+// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4)
+type SwaggerSchemaProps struct {
+ Discriminator string `json:"discriminator,omitempty"`
+ ReadOnly bool `json:"readOnly,omitempty"`
+ XML *XMLObject `json:"xml,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+ Example interface{} `json:"example,omitempty"`
+}
+
+// Schema the schema object allows the definition of input and output data types.
+// These types can be objects, but also primitives and arrays.
+// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/)
+// and uses a predefined subset of it.
+// On top of this subset, there are extensions provided by this specification to allow for more complete documentation.
+//
+// For more information: http://goo.gl/8us55a#schemaObject
+type Schema struct {
+ VendorExtensible
+ SchemaProps
+ SwaggerSchemaProps
+ ExtraProps map[string]interface{} `json:"-"`
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s Schema) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := s.Extensions[token]; ok {
+ return &ex, nil
+ }
+
+ if ex, ok := s.ExtraProps[token]; ok {
+ return &ex, nil
+ }
+
+ r, _, err := jsonpointer.GetForToken(s.SchemaProps, token)
+ if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) {
+ return r, err
+ }
+ r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token)
+ return r, err
+}
+
+// WithID sets the id for this schema, allows for chaining
+func (s *Schema) WithID(id string) *Schema {
+ s.ID = id
+ return s
+}
+
+// WithTitle sets the title for this schema, allows for chaining
+func (s *Schema) WithTitle(title string) *Schema {
+ s.Title = title
+ return s
+}
+
+// WithDescription sets the description for this schema, allows for chaining
+func (s *Schema) WithDescription(description string) *Schema {
+ s.Description = description
+ return s
+}
+
+// WithProperties sets the properties for this schema
+func (s *Schema) WithProperties(schemas map[string]Schema) *Schema {
+ s.Properties = schemas
+ return s
+}
+
+// SetProperty sets a property on this schema
+func (s *Schema) SetProperty(name string, schema Schema) *Schema {
+ if s.Properties == nil {
+ s.Properties = make(map[string]Schema)
+ }
+ s.Properties[name] = schema
+ return s
+}
+
+// WithAllOf sets the all of property
+func (s *Schema) WithAllOf(schemas ...Schema) *Schema {
+ s.AllOf = schemas
+ return s
+}
+
+// WithMaxProperties sets the max number of properties an object can have
+func (s *Schema) WithMaxProperties(max int64) *Schema {
+ s.MaxProperties = &max
+ return s
+}
+
+// WithMinProperties sets the min number of properties an object must have
+func (s *Schema) WithMinProperties(min int64) *Schema {
+ s.MinProperties = &min
+ return s
+}
+
+// Typed sets the type of this schema for a single value item
+func (s *Schema) Typed(tpe, format string) *Schema {
+ s.Type = []string{tpe}
+ s.Format = format
+ return s
+}
+
+// AddType adds a type with potential format to the types for this schema
+func (s *Schema) AddType(tpe, format string) *Schema {
+ s.Type = append(s.Type, tpe)
+ if format != "" {
+ s.Format = format
+ }
+ return s
+}
+
+// AsNullable flags this schema as nullable.
+func (s *Schema) AsNullable() *Schema {
+ s.Nullable = true
+ return s
+}
+
+// CollectionOf a fluent builder method for an array parameter
+func (s *Schema) CollectionOf(items Schema) *Schema {
+ s.Type = []string{jsonArray}
+ s.Items = &SchemaOrArray{Schema: &items}
+ return s
+}
+
+// WithDefault sets the default value on this parameter
+func (s *Schema) WithDefault(defaultValue interface{}) *Schema {
+ s.Default = defaultValue
+ return s
+}
+
+// WithRequired flags this parameter as required
+func (s *Schema) WithRequired(items ...string) *Schema {
+ s.Required = items
+ return s
+}
+
+// AddRequired adds field names to the required properties array
+func (s *Schema) AddRequired(items ...string) *Schema {
+ s.Required = append(s.Required, items...)
+ return s
+}
+
+// WithMaxLength sets a max length value
+func (s *Schema) WithMaxLength(max int64) *Schema {
+ s.MaxLength = &max
+ return s
+}
+
+// WithMinLength sets a min length value
+func (s *Schema) WithMinLength(min int64) *Schema {
+ s.MinLength = &min
+ return s
+}
+
+// WithPattern sets a pattern value
+func (s *Schema) WithPattern(pattern string) *Schema {
+ s.Pattern = pattern
+ return s
+}
+
+// WithMultipleOf sets a multiple of value
+func (s *Schema) WithMultipleOf(number float64) *Schema {
+ s.MultipleOf = &number
+ return s
+}
+
+// WithMaximum sets a maximum number value
+func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema {
+ s.Maximum = &max
+ s.ExclusiveMaximum = exclusive
+ return s
+}
+
+// WithMinimum sets a minimum number value
+func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema {
+ s.Minimum = &min
+ s.ExclusiveMinimum = exclusive
+ return s
+}
+
+// WithEnum sets a the enum values (replace)
+func (s *Schema) WithEnum(values ...interface{}) *Schema {
+ s.Enum = append([]interface{}{}, values...)
+ return s
+}
+
+// WithMaxItems sets the max items
+func (s *Schema) WithMaxItems(size int64) *Schema {
+ s.MaxItems = &size
+ return s
+}
+
+// WithMinItems sets the min items
+func (s *Schema) WithMinItems(size int64) *Schema {
+ s.MinItems = &size
+ return s
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (s *Schema) UniqueValues() *Schema {
+ s.UniqueItems = true
+ return s
+}
+
+// AllowDuplicates this array can have duplicates
+func (s *Schema) AllowDuplicates() *Schema {
+ s.UniqueItems = false
+ return s
+}
+
+// AddToAllOf adds a schema to the allOf property
+func (s *Schema) AddToAllOf(schemas ...Schema) *Schema {
+ s.AllOf = append(s.AllOf, schemas...)
+ return s
+}
+
+// WithDiscriminator sets the name of the discriminator field
+func (s *Schema) WithDiscriminator(discriminator string) *Schema {
+ s.Discriminator = discriminator
+ return s
+}
+
+// AsReadOnly flags this schema as readonly
+func (s *Schema) AsReadOnly() *Schema {
+ s.ReadOnly = true
+ return s
+}
+
+// AsWritable flags this schema as writeable (not read-only)
+func (s *Schema) AsWritable() *Schema {
+ s.ReadOnly = false
+ return s
+}
+
+// WithExample sets the example for this schema
+func (s *Schema) WithExample(example interface{}) *Schema {
+ s.Example = example
+ return s
+}
+
+// WithExternalDocs sets/removes the external docs for/from this schema.
+// When you pass empty strings as params the external documents will be removed.
+// When you pass non-empty string as one value then those values will be used on the external docs object.
+// So when you pass a non-empty description, you should also pass the url and vice versa.
+func (s *Schema) WithExternalDocs(description, url string) *Schema {
+ if description == "" && url == "" {
+ s.ExternalDocs = nil
+ return s
+ }
+
+ if s.ExternalDocs == nil {
+ s.ExternalDocs = &ExternalDocumentation{}
+ }
+ s.ExternalDocs.Description = description
+ s.ExternalDocs.URL = url
+ return s
+}
+
+// WithXMLName sets the xml name for the object
+func (s *Schema) WithXMLName(name string) *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Name = name
+ return s
+}
+
+// WithXMLNamespace sets the xml namespace for the object
+func (s *Schema) WithXMLNamespace(namespace string) *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Namespace = namespace
+ return s
+}
+
+// WithXMLPrefix sets the xml prefix for the object
+func (s *Schema) WithXMLPrefix(prefix string) *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Prefix = prefix
+ return s
+}
+
+// AsXMLAttribute flags this object as xml attribute
+func (s *Schema) AsXMLAttribute() *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Attribute = true
+ return s
+}
+
+// AsXMLElement flags this object as an xml node
+func (s *Schema) AsXMLElement() *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Attribute = false
+ return s
+}
+
+// AsWrappedXML flags this object as wrapped, this is mostly useful for array types
+func (s *Schema) AsWrappedXML() *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Wrapped = true
+ return s
+}
+
+// AsUnwrappedXML flags this object as an xml node
+func (s *Schema) AsUnwrappedXML() *Schema {
+ if s.XML == nil {
+ s.XML = new(XMLObject)
+ }
+ s.XML.Wrapped = false
+ return s
+}
+
+// SetValidations defines all schema validations.
+//
+// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered.
+func (s *Schema) SetValidations(val SchemaValidations) {
+ s.Maximum = val.Maximum
+ s.ExclusiveMaximum = val.ExclusiveMaximum
+ s.Minimum = val.Minimum
+ s.ExclusiveMinimum = val.ExclusiveMinimum
+ s.MaxLength = val.MaxLength
+ s.MinLength = val.MinLength
+ s.Pattern = val.Pattern
+ s.MaxItems = val.MaxItems
+ s.MinItems = val.MinItems
+ s.UniqueItems = val.UniqueItems
+ s.MultipleOf = val.MultipleOf
+ s.Enum = val.Enum
+ s.MinProperties = val.MinProperties
+ s.MaxProperties = val.MaxProperties
+ s.PatternProperties = val.PatternProperties
+}
+
+// WithValidations is a fluent method to set schema validations
+func (s *Schema) WithValidations(val SchemaValidations) *Schema {
+ s.SetValidations(val)
+ return s
+}
+
+// Validations returns a clone of the validations for this schema
+func (s Schema) Validations() SchemaValidations {
+ return SchemaValidations{
+ CommonValidations: CommonValidations{
+ Maximum: s.Maximum,
+ ExclusiveMaximum: s.ExclusiveMaximum,
+ Minimum: s.Minimum,
+ ExclusiveMinimum: s.ExclusiveMinimum,
+ MaxLength: s.MaxLength,
+ MinLength: s.MinLength,
+ Pattern: s.Pattern,
+ MaxItems: s.MaxItems,
+ MinItems: s.MinItems,
+ UniqueItems: s.UniqueItems,
+ MultipleOf: s.MultipleOf,
+ Enum: s.Enum,
+ },
+ MinProperties: s.MinProperties,
+ MaxProperties: s.MaxProperties,
+ PatternProperties: s.PatternProperties,
+ }
+}
+
+// MarshalJSON marshal this to JSON
+func (s Schema) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(s.SchemaProps)
+ if err != nil {
+ return nil, fmt.Errorf("schema props %v", err)
+ }
+ b2, err := json.Marshal(s.VendorExtensible)
+ if err != nil {
+ return nil, fmt.Errorf("vendor props %v", err)
+ }
+ b3, err := s.Ref.MarshalJSON()
+ if err != nil {
+ return nil, fmt.Errorf("ref prop %v", err)
+ }
+ b4, err := s.Schema.MarshalJSON()
+ if err != nil {
+ return nil, fmt.Errorf("schema prop %v", err)
+ }
+ b5, err := json.Marshal(s.SwaggerSchemaProps)
+ if err != nil {
+ return nil, fmt.Errorf("common validations %v", err)
+ }
+ var b6 []byte
+ if s.ExtraProps != nil {
+ jj, err := json.Marshal(s.ExtraProps)
+ if err != nil {
+ return nil, fmt.Errorf("extra props %v", err)
+ }
+ b6 = jj
+ }
+ return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (s *Schema) UnmarshalJSON(data []byte) error {
+ props := struct {
+ SchemaProps
+ SwaggerSchemaProps
+ }{}
+ if err := json.Unmarshal(data, &props); err != nil {
+ return err
+ }
+
+ sch := Schema{
+ SchemaProps: props.SchemaProps,
+ SwaggerSchemaProps: props.SwaggerSchemaProps,
+ }
+
+ var d map[string]interface{}
+ if err := json.Unmarshal(data, &d); err != nil {
+ return err
+ }
+
+ _ = sch.Ref.fromMap(d)
+ _ = sch.Schema.fromMap(d)
+
+ delete(d, "$ref")
+ delete(d, "$schema")
+ for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) {
+ delete(d, pn)
+ }
+
+ for k, vv := range d {
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-") {
+ if sch.Extensions == nil {
+ sch.Extensions = map[string]interface{}{}
+ }
+ sch.Extensions[k] = vv
+ continue
+ }
+ if sch.ExtraProps == nil {
+ sch.ExtraProps = map[string]interface{}{}
+ }
+ sch.ExtraProps[k] = vv
+ }
+
+ *s = sch
+
+ return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go
new file mode 100644
index 00000000..0059b99a
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/schema_loader.go
@@ -0,0 +1,331 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "reflect"
+ "strings"
+
+ "github.com/go-openapi/swag"
+)
+
+// PathLoader is a function to use when loading remote refs.
+//
+// This is a package level default. It may be overridden or bypassed by
+// specifying the loader in ExpandOptions.
+//
+// NOTE: if you are using the go-openapi/loads package, it will override
+// this value with its own default (a loader to retrieve YAML documents as
+// well as JSON ones).
+var PathLoader = func(pth string) (json.RawMessage, error) {
+ data, err := swag.LoadFromFileOrHTTP(pth)
+ if err != nil {
+ return nil, err
+ }
+ return json.RawMessage(data), nil
+}
+
+// resolverContext allows to share a context during spec processing.
+// At the moment, it just holds the index of circular references found.
+type resolverContext struct {
+ // circulars holds all visited circular references, to shortcircuit $ref resolution.
+ //
+ // This structure is privately instantiated and needs not be locked against
+ // concurrent access, unless we chose to implement a parallel spec walking.
+ circulars map[string]bool
+ basePath string
+ loadDoc func(string) (json.RawMessage, error)
+ rootID string
+}
+
+func newResolverContext(options *ExpandOptions) *resolverContext {
+ expandOptions := optionsOrDefault(options)
+
+ // path loader may be overridden by options
+ var loader func(string) (json.RawMessage, error)
+ if expandOptions.PathLoader == nil {
+ loader = PathLoader
+ } else {
+ loader = expandOptions.PathLoader
+ }
+
+ return &resolverContext{
+ circulars: make(map[string]bool),
+ basePath: expandOptions.RelativeBase, // keep the root base path in context
+ loadDoc: loader,
+ }
+}
+
+type schemaLoader struct {
+ root interface{}
+ options *ExpandOptions
+ cache ResolutionCache
+ context *resolverContext
+}
+
+func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader {
+ if ref.IsRoot() || ref.HasFragmentOnly {
+ return r
+ }
+
+ baseRef := MustCreateRef(basePath)
+ currentRef := normalizeRef(&ref, basePath)
+ if strings.HasPrefix(currentRef.String(), baseRef.String()) {
+ return r
+ }
+
+ // set a new root against which to resolve
+ rootURL := currentRef.GetURL()
+ rootURL.Fragment = ""
+ root, _ := r.cache.Get(rootURL.String())
+
+ // shallow copy of resolver options to set a new RelativeBase when
+ // traversing multiple documents
+ newOptions := r.options
+ newOptions.RelativeBase = rootURL.String()
+
+ return defaultSchemaLoader(root, newOptions, r.cache, r.context)
+}
+
+func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string {
+ if transitive != r {
+ if transitive.options != nil && transitive.options.RelativeBase != "" {
+ return normalizeBase(transitive.options.RelativeBase)
+ }
+ }
+
+ return basePath
+}
+
+func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error {
+ tgt := reflect.ValueOf(target)
+ if tgt.Kind() != reflect.Ptr {
+ return ErrResolveRefNeedsAPointer
+ }
+
+ if ref.GetURL() == nil {
+ return nil
+ }
+
+ var (
+ res interface{}
+ data interface{}
+ err error
+ )
+
+ // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means
+ // it is pointing somewhere in the root.
+ root := r.root
+ if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" {
+ if baseRef, erb := NewRef(basePath); erb == nil {
+ root, _, _, _ = r.load(baseRef.GetURL())
+ }
+ }
+
+ if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil {
+ data = root
+ } else {
+ baseRef := normalizeRef(ref, basePath)
+ data, _, _, err = r.load(baseRef.GetURL())
+ if err != nil {
+ return err
+ }
+ }
+
+ res = data
+ if ref.String() != "" {
+ res, _, err = ref.GetPointer().Get(data)
+ if err != nil {
+ return err
+ }
+ }
+ return swag.DynamicJSONToStruct(res, target)
+}
+
+func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) {
+ debugLog("loading schema from url: %s", refURL)
+ toFetch := *refURL
+ toFetch.Fragment = ""
+
+ var err error
+ pth := toFetch.String()
+ normalized := normalizeBase(pth)
+ debugLog("loading doc from: %s", normalized)
+
+ data, fromCache := r.cache.Get(normalized)
+ if fromCache {
+ return data, toFetch, fromCache, nil
+ }
+
+ b, err := r.context.loadDoc(normalized)
+ if err != nil {
+ return nil, url.URL{}, false, err
+ }
+
+ var doc interface{}
+ if err := json.Unmarshal(b, &doc); err != nil {
+ return nil, url.URL{}, false, err
+ }
+ r.cache.Set(normalized, doc)
+
+ return doc, toFetch, fromCache, nil
+}
+
+// isCircular detects cycles in sequences of $ref.
+//
+// It relies on a private context (which needs not be locked).
+func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) {
+ normalizedRef := normalizeURI(ref.String(), basePath)
+ if _, ok := r.context.circulars[normalizedRef]; ok {
+ // circular $ref has been already detected in another explored cycle
+ foundCycle = true
+ return
+ }
+ foundCycle = swag.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased
+ if foundCycle {
+ r.context.circulars[normalizedRef] = true
+ }
+ return
+}
+
+// Resolve resolves a reference against basePath and stores the result in target.
+//
+// Resolve is not in charge of following references: it only resolves ref by following its URL.
+//
+// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them.
+//
+// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct
+func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error {
+ return r.resolveRef(ref, target, basePath)
+}
+
+func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error {
+ var ref *Ref
+ switch refable := input.(type) {
+ case *Schema:
+ ref = &refable.Ref
+ case *Parameter:
+ ref = &refable.Ref
+ case *Response:
+ ref = &refable.Ref
+ case *PathItem:
+ ref = &refable.Ref
+ default:
+ return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType)
+ }
+
+ curRef := ref.String()
+ if curRef == "" {
+ return nil
+ }
+
+ normalizedRef := normalizeRef(ref, basePath)
+ normalizedBasePath := normalizedRef.RemoteURI()
+
+ if r.isCircular(normalizedRef, basePath, parentRefs...) {
+ return nil
+ }
+
+ if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) {
+ return err
+ }
+
+ if ref.String() == "" || ref.String() == curRef {
+ // done with rereferencing
+ return nil
+ }
+
+ parentRefs = append(parentRefs, normalizedRef.String())
+ return r.deref(input, parentRefs, normalizedBasePath)
+}
+
+func (r *schemaLoader) shouldStopOnError(err error) bool {
+ if err != nil && !r.options.ContinueOnError {
+ return true
+ }
+
+ if err != nil {
+ log.Println(err)
+ }
+
+ return false
+}
+
+func (r *schemaLoader) setSchemaID(target interface{}, id, basePath string) (string, string) {
+ debugLog("schema has ID: %s", id)
+
+ // handling the case when id is a folder
+ // remember that basePath has to point to a file
+ var refPath string
+ if strings.HasSuffix(id, "/") {
+ // ensure this is detected as a file, not a folder
+ refPath = fmt.Sprintf("%s%s", id, "placeholder.json")
+ } else {
+ refPath = id
+ }
+
+ // updates the current base path
+ // * important: ID can be a relative path
+ // * registers target to be fetchable from the new base proposed by this id
+ newBasePath := normalizeURI(refPath, basePath)
+
+ // store found IDs for possible future reuse in $ref
+ r.cache.Set(newBasePath, target)
+
+ // the root document has an ID: all $ref relative to that ID may
+ // be rebased relative to the root document
+ if basePath == r.context.basePath {
+ debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath)
+ r.context.rootID = newBasePath
+ }
+
+ return newBasePath, refPath
+}
+
+func defaultSchemaLoader(
+ root interface{},
+ expandOptions *ExpandOptions,
+ cache ResolutionCache,
+ context *resolverContext) *schemaLoader {
+
+ if expandOptions == nil {
+ expandOptions = &ExpandOptions{}
+ }
+
+ cache = cacheOrDefault(cache)
+
+ if expandOptions.RelativeBase == "" {
+ // if no relative base is provided, assume the root document
+ // contains all $ref, or at least, that the relative documents
+ // may be resolved from the current working directory.
+ expandOptions.RelativeBase = baseForRoot(root, cache)
+ }
+ debugLog("effective expander options: %#v", expandOptions)
+
+ if context == nil {
+ context = newResolverContext(expandOptions)
+ }
+
+ return &schemaLoader{
+ root: root,
+ options: expandOptions,
+ cache: cache,
+ context: context,
+ }
+}
diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json
new file mode 100644
index 00000000..bcbb8474
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json
@@ -0,0 +1,149 @@
+{
+ "id": "http://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "description": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "positiveInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "positiveIntegerDefault0": {
+ "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
+ },
+ "simpleTypes": {
+ "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ },
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "$schema": {
+ "type": "string"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "multipleOf": {
+ "type": "number",
+ "minimum": 0,
+ "exclusiveMinimum": true
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "boolean",
+ "default": false
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxLength": { "$ref": "#/definitions/positiveInteger" },
+ "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "#/definitions/positiveInteger" },
+ "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxProperties": { "$ref": "#/definitions/positiveInteger" },
+ "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "enum": {
+ "type": "array",
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "dependencies": {
+ "exclusiveMaximum": [ "maximum" ],
+ "exclusiveMinimum": [ "minimum" ]
+ },
+ "default": {}
+}
diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json
new file mode 100644
index 00000000..ebe10ed3
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json
@@ -0,0 +1,1607 @@
+{
+ "title": "A JSON Schema for Swagger 2.0 API.",
+ "id": "http://swagger.io/v2/schema.json#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "required": [
+ "swagger",
+ "info",
+ "paths"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "swagger": {
+ "type": "string",
+ "enum": [
+ "2.0"
+ ],
+ "description": "The Swagger version of this document."
+ },
+ "info": {
+ "$ref": "#/definitions/info"
+ },
+ "host": {
+ "type": "string",
+ "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$",
+ "description": "The host (name or ip) of the API. Example: 'swagger.io'"
+ },
+ "basePath": {
+ "type": "string",
+ "pattern": "^/",
+ "description": "The base path to the API. Example: '/api'."
+ },
+ "schemes": {
+ "$ref": "#/definitions/schemesList"
+ },
+ "consumes": {
+ "description": "A list of MIME types accepted by the API.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "produces": {
+ "description": "A list of MIME types the API can produce.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "paths": {
+ "$ref": "#/definitions/paths"
+ },
+ "definitions": {
+ "$ref": "#/definitions/definitions"
+ },
+ "parameters": {
+ "$ref": "#/definitions/parameterDefinitions"
+ },
+ "responses": {
+ "$ref": "#/definitions/responseDefinitions"
+ },
+ "security": {
+ "$ref": "#/definitions/security"
+ },
+ "securityDefinitions": {
+ "$ref": "#/definitions/securityDefinitions"
+ },
+ "tags": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/tag"
+ },
+ "uniqueItems": true
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ }
+ },
+ "definitions": {
+ "info": {
+ "type": "object",
+ "description": "General information about the API.",
+ "required": [
+ "version",
+ "title"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "title": {
+ "type": "string",
+ "description": "A unique and precise title of the API."
+ },
+ "version": {
+ "type": "string",
+ "description": "A semantic version number of the API."
+ },
+ "description": {
+ "type": "string",
+ "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed."
+ },
+ "termsOfService": {
+ "type": "string",
+ "description": "The terms of service for the API."
+ },
+ "contact": {
+ "$ref": "#/definitions/contact"
+ },
+ "license": {
+ "$ref": "#/definitions/license"
+ }
+ }
+ },
+ "contact": {
+ "type": "object",
+ "description": "Contact information for the owners of the API.",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The identifying name of the contact person/organization."
+ },
+ "url": {
+ "type": "string",
+ "description": "The URL pointing to the contact information.",
+ "format": "uri"
+ },
+ "email": {
+ "type": "string",
+ "description": "The email address of the contact person/organization.",
+ "format": "email"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "license": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name of the license type. It's encouraged to use an OSI compatible license."
+ },
+ "url": {
+ "type": "string",
+ "description": "The URL pointing to the license.",
+ "format": "uri"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "paths": {
+ "type": "object",
+ "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ },
+ "^/": {
+ "$ref": "#/definitions/pathItem"
+ }
+ },
+ "additionalProperties": false
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/schema"
+ },
+ "description": "One or more JSON objects describing the schemas being consumed and produced by the API."
+ },
+ "parameterDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/parameter"
+ },
+ "description": "One or more JSON representations for parameters"
+ },
+ "responseDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/response"
+ },
+ "description": "One or more JSON representations for responses"
+ },
+ "externalDocs": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "information about external documentation",
+ "required": [
+ "url"
+ ],
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "url": {
+ "type": "string",
+ "format": "uri"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "examples": {
+ "type": "object",
+ "additionalProperties": true
+ },
+ "mimeType": {
+ "type": "string",
+ "description": "The MIME type of the HTTP message."
+ },
+ "operation": {
+ "type": "object",
+ "required": [
+ "responses"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "tags": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "summary": {
+ "type": "string",
+ "description": "A brief summary of the operation."
+ },
+ "description": {
+ "type": "string",
+ "description": "A longer description of the operation, GitHub Flavored Markdown is allowed."
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "operationId": {
+ "type": "string",
+ "description": "A unique identifier of the operation."
+ },
+ "produces": {
+ "description": "A list of MIME types the API can produce.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "consumes": {
+ "description": "A list of MIME types the API can consume.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "parameters": {
+ "$ref": "#/definitions/parametersList"
+ },
+ "responses": {
+ "$ref": "#/definitions/responses"
+ },
+ "schemes": {
+ "$ref": "#/definitions/schemesList"
+ },
+ "deprecated": {
+ "type": "boolean",
+ "default": false
+ },
+ "security": {
+ "$ref": "#/definitions/security"
+ }
+ }
+ },
+ "pathItem": {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "$ref": {
+ "type": "string"
+ },
+ "get": {
+ "$ref": "#/definitions/operation"
+ },
+ "put": {
+ "$ref": "#/definitions/operation"
+ },
+ "post": {
+ "$ref": "#/definitions/operation"
+ },
+ "delete": {
+ "$ref": "#/definitions/operation"
+ },
+ "options": {
+ "$ref": "#/definitions/operation"
+ },
+ "head": {
+ "$ref": "#/definitions/operation"
+ },
+ "patch": {
+ "$ref": "#/definitions/operation"
+ },
+ "parameters": {
+ "$ref": "#/definitions/parametersList"
+ }
+ }
+ },
+ "responses": {
+ "type": "object",
+ "description": "Response objects names can either be any valid HTTP status code or 'default'.",
+ "minProperties": 1,
+ "additionalProperties": false,
+ "patternProperties": {
+ "^([0-9]{3})$|^(default)$": {
+ "$ref": "#/definitions/responseValue"
+ },
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "not": {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ }
+ },
+ "responseValue": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/response"
+ },
+ {
+ "$ref": "#/definitions/jsonReference"
+ }
+ ]
+ },
+ "response": {
+ "type": "object",
+ "required": [
+ "description"
+ ],
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "schema": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "$ref": "#/definitions/fileSchema"
+ }
+ ]
+ },
+ "headers": {
+ "$ref": "#/definitions/headers"
+ },
+ "examples": {
+ "$ref": "#/definitions/examples"
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "headers": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/header"
+ }
+ },
+ "header": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "integer",
+ "boolean",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "vendorExtension": {
+ "description": "Any property starting with x- is valid.",
+ "additionalProperties": true,
+ "additionalItems": true
+ },
+ "bodyParameter": {
+ "type": "object",
+ "required": [
+ "name",
+ "in",
+ "schema"
+ ],
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "body"
+ ]
+ },
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "schema": {
+ "$ref": "#/definitions/schema"
+ }
+ },
+ "additionalProperties": false
+ },
+ "headerParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "header"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "queryParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "query"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "allowEmptyValue": {
+ "type": "boolean",
+ "default": false,
+ "description": "allows sending a parameter by name only or with an empty value."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormatWithMulti"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "formDataParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "formData"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "allowEmptyValue": {
+ "type": "boolean",
+ "default": false,
+ "description": "allows sending a parameter by name only or with an empty value."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array",
+ "file"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormatWithMulti"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "pathParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "required": [
+ "required"
+ ],
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "enum": [
+ true
+ ],
+ "description": "Determines whether or not this parameter is required or optional."
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "path"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "nonBodyParameter": {
+ "type": "object",
+ "required": [
+ "name",
+ "in",
+ "type"
+ ],
+ "oneOf": [
+ {
+ "$ref": "#/definitions/headerParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/formDataParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/queryParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/pathParameterSubSchema"
+ }
+ ]
+ },
+ "parameter": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/bodyParameter"
+ },
+ {
+ "$ref": "#/definitions/nonBodyParameter"
+ }
+ ]
+ },
+ "schema": {
+ "type": "object",
+ "description": "A deterministic version of a JSON Schema object.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "$ref": {
+ "type": "string"
+ },
+ "format": {
+ "type": "string"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "multipleOf": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+ },
+ "maximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "pattern": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+ },
+ "maxItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "uniqueItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+ },
+ "maxProperties": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minProperties": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "required": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+ },
+ "enum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+ },
+ "additionalProperties": {
+ "anyOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "type": "boolean"
+ }
+ ],
+ "default": {}
+ },
+ "type": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/type"
+ },
+ "items": {
+ "anyOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "$ref": "#/definitions/schema"
+ }
+ }
+ ],
+ "default": {}
+ },
+ "allOf": {
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "$ref": "#/definitions/schema"
+ }
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/schema"
+ },
+ "default": {}
+ },
+ "discriminator": {
+ "type": "string"
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "xml": {
+ "$ref": "#/definitions/xml"
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "example": {}
+ },
+ "additionalProperties": false
+ },
+ "fileSchema": {
+ "type": "object",
+ "description": "A deterministic version of a JSON Schema object.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "format": {
+ "type": "string"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "required": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "file"
+ ]
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "example": {}
+ },
+ "additionalProperties": false
+ },
+ "primitivesItems": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "integer",
+ "boolean",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "security": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/securityRequirement"
+ },
+ "uniqueItems": true
+ },
+ "securityRequirement": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ },
+ "xml": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "namespace": {
+ "type": "string"
+ },
+ "prefix": {
+ "type": "string"
+ },
+ "attribute": {
+ "type": "boolean",
+ "default": false
+ },
+ "wrapped": {
+ "type": "boolean",
+ "default": false
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "tag": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "securityDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/basicAuthenticationSecurity"
+ },
+ {
+ "$ref": "#/definitions/apiKeySecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2ImplicitSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2PasswordSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2ApplicationSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2AccessCodeSecurity"
+ }
+ ]
+ }
+ },
+ "basicAuthenticationSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "basic"
+ ]
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "apiKeySecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "name",
+ "in"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "apiKey"
+ ]
+ },
+ "name": {
+ "type": "string"
+ },
+ "in": {
+ "type": "string",
+ "enum": [
+ "header",
+ "query"
+ ]
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2ImplicitSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "authorizationUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "implicit"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "authorizationUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2PasswordSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "password"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2ApplicationSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "application"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2AccessCodeSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "authorizationUrl",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "accessCode"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "authorizationUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2Scopes": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "mediaTypeList": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/mimeType"
+ },
+ "uniqueItems": true
+ },
+ "parametersList": {
+ "type": "array",
+ "description": "The parameters needed to send a valid API call.",
+ "additionalItems": false,
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/parameter"
+ },
+ {
+ "$ref": "#/definitions/jsonReference"
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+ "schemesList": {
+ "type": "array",
+ "description": "The transfer protocol of the API.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "http",
+ "https",
+ "ws",
+ "wss"
+ ]
+ },
+ "uniqueItems": true
+ },
+ "collectionFormat": {
+ "type": "string",
+ "enum": [
+ "csv",
+ "ssv",
+ "tsv",
+ "pipes"
+ ],
+ "default": "csv"
+ },
+ "collectionFormatWithMulti": {
+ "type": "string",
+ "enum": [
+ "csv",
+ "ssv",
+ "tsv",
+ "pipes",
+ "multi"
+ ],
+ "default": "csv"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "multipleOf": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+ },
+ "maximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "pattern": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+ },
+ "maxItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "uniqueItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+ },
+ "enum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+ },
+ "jsonReference": {
+ "type": "object",
+ "required": [
+ "$ref"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "$ref": {
+ "type": "string"
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go
new file mode 100644
index 00000000..9d0bdae9
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/security_scheme.go
@@ -0,0 +1,170 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+const (
+ basic = "basic"
+ apiKey = "apiKey"
+ oauth2 = "oauth2"
+ implicit = "implicit"
+ password = "password"
+ application = "application"
+ accessCode = "accessCode"
+)
+
+// BasicAuth creates a basic auth security scheme
+func BasicAuth() *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}}
+}
+
+// APIKeyAuth creates an api key auth security scheme
+func APIKeyAuth(fieldName, valueSource string) *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}}
+}
+
+// OAuth2Implicit creates an implicit flow oauth2 security scheme
+func OAuth2Implicit(authorizationURL string) *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+ Type: oauth2,
+ Flow: implicit,
+ AuthorizationURL: authorizationURL,
+ }}
+}
+
+// OAuth2Password creates a password flow oauth2 security scheme
+func OAuth2Password(tokenURL string) *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+ Type: oauth2,
+ Flow: password,
+ TokenURL: tokenURL,
+ }}
+}
+
+// OAuth2Application creates an application flow oauth2 security scheme
+func OAuth2Application(tokenURL string) *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+ Type: oauth2,
+ Flow: application,
+ TokenURL: tokenURL,
+ }}
+}
+
+// OAuth2AccessToken creates an access token flow oauth2 security scheme
+func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme {
+ return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+ Type: oauth2,
+ Flow: accessCode,
+ AuthorizationURL: authorizationURL,
+ TokenURL: tokenURL,
+ }}
+}
+
+// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section
+type SecuritySchemeProps struct {
+ Description string `json:"description,omitempty"`
+ Type string `json:"type"`
+ Name string `json:"name,omitempty"` // api key
+ In string `json:"in,omitempty"` // api key
+ Flow string `json:"flow,omitempty"` // oauth2
+ AuthorizationURL string `json:"authorizationUrl"` // oauth2
+ TokenURL string `json:"tokenUrl,omitempty"` // oauth2
+ Scopes map[string]string `json:"scopes,omitempty"` // oauth2
+}
+
+// AddScope adds a scope to this security scheme
+func (s *SecuritySchemeProps) AddScope(scope, description string) {
+ if s.Scopes == nil {
+ s.Scopes = make(map[string]string)
+ }
+ s.Scopes[scope] = description
+}
+
+// SecurityScheme allows the definition of a security scheme that can be used by the operations.
+// Supported schemes are basic authentication, an API key (either as a header or as a query parameter)
+// and OAuth2's common flows (implicit, password, application and access code).
+//
+// For more information: http://goo.gl/8us55a#securitySchemeObject
+type SecurityScheme struct {
+ VendorExtensible
+ SecuritySchemeProps
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SecurityScheme) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := s.Extensions[token]; ok {
+ return &ex, nil
+ }
+
+ r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token)
+ return r, err
+}
+
+// MarshalJSON marshal this to JSON
+func (s SecurityScheme) MarshalJSON() ([]byte, error) {
+ var (
+ b1 []byte
+ err error
+ )
+
+ if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") {
+ // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string
+ b1, err = json.Marshal(s.SecuritySchemeProps)
+ } else {
+ // when not oauth2, empty AuthorizationURL should be omitted
+ b1, err = json.Marshal(struct {
+ Description string `json:"description,omitempty"`
+ Type string `json:"type"`
+ Name string `json:"name,omitempty"` // api key
+ In string `json:"in,omitempty"` // api key
+ Flow string `json:"flow,omitempty"` // oauth2
+ AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2
+ TokenURL string `json:"tokenUrl,omitempty"` // oauth2
+ Scopes map[string]string `json:"scopes,omitempty"` // oauth2
+ }{
+ Description: s.Description,
+ Type: s.Type,
+ Name: s.Name,
+ In: s.In,
+ Flow: s.Flow,
+ AuthorizationURL: s.AuthorizationURL,
+ TokenURL: s.TokenURL,
+ Scopes: s.Scopes,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ b2, err := json.Marshal(s.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (s *SecurityScheme) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &s.VendorExtensible)
+}
diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go
new file mode 100644
index 00000000..876aa127
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/spec.go
@@ -0,0 +1,78 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+)
+
+//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json
+//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema
+//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/...
+//go:generate perl -pi -e s,Json,JSON,g bindata.go
+
+const (
+ // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs
+ SwaggerSchemaURL = "http://swagger.io/v2/schema.json#"
+ // JSONSchemaURL the url for the json schema
+ JSONSchemaURL = "http://json-schema.org/draft-04/schema#"
+)
+
+// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error
+func MustLoadJSONSchemaDraft04() *Schema {
+ d, e := JSONSchemaDraft04()
+ if e != nil {
+ panic(e)
+ }
+ return d
+}
+
+// JSONSchemaDraft04 loads the json schema document for json shema draft04
+func JSONSchemaDraft04() (*Schema, error) {
+ b, err := jsonschemaDraft04JSONBytes()
+ if err != nil {
+ return nil, err
+ }
+
+ schema := new(Schema)
+ if err := json.Unmarshal(b, schema); err != nil {
+ return nil, err
+ }
+ return schema, nil
+}
+
+// MustLoadSwagger20Schema panics when Swagger20Schema returns an error
+func MustLoadSwagger20Schema() *Schema {
+ d, e := Swagger20Schema()
+ if e != nil {
+ panic(e)
+ }
+ return d
+}
+
+// Swagger20Schema loads the swagger 2.0 schema from the embedded assets
+func Swagger20Schema() (*Schema, error) {
+
+ b, err := v2SchemaJSONBytes()
+ if err != nil {
+ return nil, err
+ }
+
+ schema := new(Schema)
+ if err := json.Unmarshal(b, schema); err != nil {
+ return nil, err
+ }
+ return schema, nil
+}
diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go
new file mode 100644
index 00000000..1590fd17
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/swagger.go
@@ -0,0 +1,448 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// Swagger this is the root document object for the API specification.
+// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier)
+// together into one document.
+//
+// For more information: http://goo.gl/8us55a#swagger-object-
+type Swagger struct {
+ VendorExtensible
+ SwaggerProps
+}
+
+// JSONLookup look up a value by the json property name
+func (s Swagger) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := s.Extensions[token]; ok {
+ return &ex, nil
+ }
+ r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token)
+ return r, err
+}
+
+// MarshalJSON marshals this swagger structure to json
+func (s Swagger) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(s.SwaggerProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(s.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON unmarshals a swagger spec from json
+func (s *Swagger) UnmarshalJSON(data []byte) error {
+ var sw Swagger
+ if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil {
+ return err
+ }
+ *s = sw
+ return nil
+}
+
+// GobEncode provides a safe gob encoder for Swagger, including extensions
+func (s Swagger) GobEncode() ([]byte, error) {
+ var b bytes.Buffer
+ raw := struct {
+ Props SwaggerProps
+ Ext VendorExtensible
+ }{
+ Props: s.SwaggerProps,
+ Ext: s.VendorExtensible,
+ }
+ err := gob.NewEncoder(&b).Encode(raw)
+ return b.Bytes(), err
+}
+
+// GobDecode provides a safe gob decoder for Swagger, including extensions
+func (s *Swagger) GobDecode(b []byte) error {
+ var raw struct {
+ Props SwaggerProps
+ Ext VendorExtensible
+ }
+ buf := bytes.NewBuffer(b)
+ err := gob.NewDecoder(buf).Decode(&raw)
+ if err != nil {
+ return err
+ }
+ s.SwaggerProps = raw.Props
+ s.VendorExtensible = raw.Ext
+ return nil
+}
+
+// SwaggerProps captures the top-level properties of an Api specification
+//
+// NOTE: validation rules
+// - the scheme, when present must be from [http, https, ws, wss]
+// - BasePath must start with a leading "/"
+// - Paths is required
+type SwaggerProps struct {
+ ID string `json:"id,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Produces []string `json:"produces,omitempty"`
+ Schemes []string `json:"schemes,omitempty"`
+ Swagger string `json:"swagger,omitempty"`
+ Info *Info `json:"info,omitempty"`
+ Host string `json:"host,omitempty"`
+ BasePath string `json:"basePath,omitempty"`
+ Paths *Paths `json:"paths"`
+ Definitions Definitions `json:"definitions,omitempty"`
+ Parameters map[string]Parameter `json:"parameters,omitempty"`
+ Responses map[string]Response `json:"responses,omitempty"`
+ SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"`
+ Security []map[string][]string `json:"security,omitempty"`
+ Tags []Tag `json:"tags,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+}
+
+type swaggerPropsAlias SwaggerProps
+
+type gobSwaggerPropsAlias struct {
+ Security []map[string]struct {
+ List []string
+ Pad bool
+ }
+ Alias *swaggerPropsAlias
+ SecurityIsEmpty bool
+}
+
+// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements
+func (o SwaggerProps) GobEncode() ([]byte, error) {
+ raw := gobSwaggerPropsAlias{
+ Alias: (*swaggerPropsAlias)(&o),
+ }
+
+ var b bytes.Buffer
+ if o.Security == nil {
+ // nil security requirement
+ err := gob.NewEncoder(&b).Encode(raw)
+ return b.Bytes(), err
+ }
+
+ if len(o.Security) == 0 {
+ // empty, but non-nil security requirement
+ raw.SecurityIsEmpty = true
+ raw.Alias.Security = nil
+ err := gob.NewEncoder(&b).Encode(raw)
+ return b.Bytes(), err
+ }
+
+ raw.Security = make([]map[string]struct {
+ List []string
+ Pad bool
+ }, 0, len(o.Security))
+ for _, req := range o.Security {
+ v := make(map[string]struct {
+ List []string
+ Pad bool
+ }, len(req))
+ for k, val := range req {
+ v[k] = struct {
+ List []string
+ Pad bool
+ }{
+ List: val,
+ }
+ }
+ raw.Security = append(raw.Security, v)
+ }
+
+ err := gob.NewEncoder(&b).Encode(raw)
+ return b.Bytes(), err
+}
+
+// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements
+func (o *SwaggerProps) GobDecode(b []byte) error {
+ var raw gobSwaggerPropsAlias
+
+ buf := bytes.NewBuffer(b)
+ err := gob.NewDecoder(buf).Decode(&raw)
+ if err != nil {
+ return err
+ }
+ if raw.Alias == nil {
+ return nil
+ }
+
+ switch {
+ case raw.SecurityIsEmpty:
+ // empty, but non-nil security requirement
+ raw.Alias.Security = []map[string][]string{}
+ case len(raw.Alias.Security) == 0:
+ // nil security requirement
+ raw.Alias.Security = nil
+ default:
+ raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security))
+ for _, req := range raw.Security {
+ v := make(map[string][]string, len(req))
+ for k, val := range req {
+ v[k] = make([]string, 0, len(val.List))
+ v[k] = append(v[k], val.List...)
+ }
+ raw.Alias.Security = append(raw.Alias.Security, v)
+ }
+ }
+
+ *o = *(*SwaggerProps)(raw.Alias)
+ return nil
+}
+
+// Dependencies represent a dependencies property
+type Dependencies map[string]SchemaOrStringArray
+
+// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property
+type SchemaOrBool struct {
+ Allows bool
+ Schema *Schema
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) {
+ if token == "allows" {
+ return s.Allows, nil
+ }
+ r, _, err := jsonpointer.GetForToken(s.Schema, token)
+ return r, err
+}
+
+var jsTrue = []byte("true")
+var jsFalse = []byte("false")
+
+// MarshalJSON convert this object to JSON
+func (s SchemaOrBool) MarshalJSON() ([]byte, error) {
+ if s.Schema != nil {
+ return json.Marshal(s.Schema)
+ }
+
+ if s.Schema == nil && !s.Allows {
+ return jsFalse, nil
+ }
+ return jsTrue, nil
+}
+
+// UnmarshalJSON converts this bool or schema object from a JSON structure
+func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
+ var nw SchemaOrBool
+ if len(data) > 0 {
+ if data[0] == '{' {
+ var sch Schema
+ if err := json.Unmarshal(data, &sch); err != nil {
+ return err
+ }
+ nw.Schema = &sch
+ }
+ nw.Allows = !bytes.Equal(data, []byte("false"))
+ }
+ *s = nw
+ return nil
+}
+
+// SchemaOrStringArray represents a schema or a string array
+type SchemaOrStringArray struct {
+ Schema *Schema
+ Property []string
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) {
+ r, _, err := jsonpointer.GetForToken(s.Schema, token)
+ return r, err
+}
+
+// MarshalJSON converts this schema object or array into JSON structure
+func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) {
+ if len(s.Property) > 0 {
+ return json.Marshal(s.Property)
+ }
+ if s.Schema != nil {
+ return json.Marshal(s.Schema)
+ }
+ return []byte("null"), nil
+}
+
+// UnmarshalJSON converts this schema object or array from a JSON structure
+func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error {
+ var first byte
+ if len(data) > 1 {
+ first = data[0]
+ }
+ var nw SchemaOrStringArray
+ if first == '{' {
+ var sch Schema
+ if err := json.Unmarshal(data, &sch); err != nil {
+ return err
+ }
+ nw.Schema = &sch
+ }
+ if first == '[' {
+ if err := json.Unmarshal(data, &nw.Property); err != nil {
+ return err
+ }
+ }
+ *s = nw
+ return nil
+}
+
+// Definitions contains the models explicitly defined in this spec
+// An object to hold data types that can be consumed and produced by operations.
+// These data types can be primitives, arrays or models.
+//
+// For more information: http://goo.gl/8us55a#definitionsObject
+type Definitions map[string]Schema
+
+// SecurityDefinitions a declaration of the security schemes available to be used in the specification.
+// This does not enforce the security schemes on the operations and only serves to provide
+// the relevant details for each scheme.
+//
+// For more information: http://goo.gl/8us55a#securityDefinitionsObject
+type SecurityDefinitions map[string]*SecurityScheme
+
+// StringOrArray represents a value that can either be a string
+// or an array of strings. Mainly here for serialization purposes
+type StringOrArray []string
+
+// Contains returns true when the value is contained in the slice
+func (s StringOrArray) Contains(value string) bool {
+ for _, str := range s {
+ if str == value {
+ return true
+ }
+ }
+ return false
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) {
+ if _, err := strconv.Atoi(token); err == nil {
+ r, _, err := jsonpointer.GetForToken(s.Schemas, token)
+ return r, err
+ }
+ r, _, err := jsonpointer.GetForToken(s.Schema, token)
+ return r, err
+}
+
+// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string
+func (s *StringOrArray) UnmarshalJSON(data []byte) error {
+ var first byte
+ if len(data) > 1 {
+ first = data[0]
+ }
+
+ if first == '[' {
+ var parsed []string
+ if err := json.Unmarshal(data, &parsed); err != nil {
+ return err
+ }
+ *s = StringOrArray(parsed)
+ return nil
+ }
+
+ var single interface{}
+ if err := json.Unmarshal(data, &single); err != nil {
+ return err
+ }
+ if single == nil {
+ return nil
+ }
+ switch v := single.(type) {
+ case string:
+ *s = StringOrArray([]string{v})
+ return nil
+ default:
+ return fmt.Errorf("only string or array is allowed, not %T", single)
+ }
+}
+
+// MarshalJSON converts this string or array to a JSON array or JSON string
+func (s StringOrArray) MarshalJSON() ([]byte, error) {
+ if len(s) == 1 {
+ return json.Marshal([]string(s)[0])
+ }
+ return json.Marshal([]string(s))
+}
+
+// SchemaOrArray represents a value that can either be a Schema
+// or an array of Schema. Mainly here for serialization purposes
+type SchemaOrArray struct {
+ Schema *Schema
+ Schemas []Schema
+}
+
+// Len returns the number of schemas in this property
+func (s SchemaOrArray) Len() int {
+ if s.Schema != nil {
+ return 1
+ }
+ return len(s.Schemas)
+}
+
+// ContainsType returns true when one of the schemas is of the specified type
+func (s *SchemaOrArray) ContainsType(name string) bool {
+ if s.Schema != nil {
+ return s.Schema.Type != nil && s.Schema.Type.Contains(name)
+ }
+ return false
+}
+
+// MarshalJSON converts this schema object or array into JSON structure
+func (s SchemaOrArray) MarshalJSON() ([]byte, error) {
+ if len(s.Schemas) > 0 {
+ return json.Marshal(s.Schemas)
+ }
+ return json.Marshal(s.Schema)
+}
+
+// UnmarshalJSON converts this schema object or array from a JSON structure
+func (s *SchemaOrArray) UnmarshalJSON(data []byte) error {
+ var nw SchemaOrArray
+ var first byte
+ if len(data) > 1 {
+ first = data[0]
+ }
+ if first == '{' {
+ var sch Schema
+ if err := json.Unmarshal(data, &sch); err != nil {
+ return err
+ }
+ nw.Schema = &sch
+ }
+ if first == '[' {
+ if err := json.Unmarshal(data, &nw.Schemas); err != nil {
+ return err
+ }
+ }
+ *s = nw
+ return nil
+}
+
+// vim:set ft=go noet sts=2 sw=2 ts=2:
diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go
new file mode 100644
index 00000000..faa3d3de
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/tag.go
@@ -0,0 +1,75 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/swag"
+)
+
+// TagProps describe a tag entry in the top level tags section of a swagger spec
+type TagProps struct {
+ Description string `json:"description,omitempty"`
+ Name string `json:"name,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+}
+
+// NewTag creates a new tag
+func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag {
+ return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}}
+}
+
+// Tag allows adding meta data to a single tag that is used by the
+// [Operation Object](http://goo.gl/8us55a#operationObject).
+// It is not mandatory to have a Tag Object per tag used there.
+//
+// For more information: http://goo.gl/8us55a#tagObject
+type Tag struct {
+ VendorExtensible
+ TagProps
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (t Tag) JSONLookup(token string) (interface{}, error) {
+ if ex, ok := t.Extensions[token]; ok {
+ return &ex, nil
+ }
+
+ r, _, err := jsonpointer.GetForToken(t.TagProps, token)
+ return r, err
+}
+
+// MarshalJSON marshal this to JSON
+func (t Tag) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(t.TagProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(t.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (t *Tag) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &t.TagProps); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &t.VendorExtensible)
+}
diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go
new file mode 100644
index 00000000..5bdfe40b
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/url_go19.go
@@ -0,0 +1,11 @@
+package spec
+
+import "net/url"
+
+func parseURL(s string) (*url.URL, error) {
+ u, err := url.Parse(s)
+ if err == nil {
+ u.OmitHost = false
+ }
+ return u, err
+}
diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go
new file mode 100644
index 00000000..6360a8ea
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/validations.go
@@ -0,0 +1,215 @@
+package spec
+
+// CommonValidations describe common JSON-schema validations
+type CommonValidations struct {
+ Maximum *float64 `json:"maximum,omitempty"`
+ ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"`
+ Minimum *float64 `json:"minimum,omitempty"`
+ ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"`
+ MaxLength *int64 `json:"maxLength,omitempty"`
+ MinLength *int64 `json:"minLength,omitempty"`
+ Pattern string `json:"pattern,omitempty"`
+ MaxItems *int64 `json:"maxItems,omitempty"`
+ MinItems *int64 `json:"minItems,omitempty"`
+ UniqueItems bool `json:"uniqueItems,omitempty"`
+ MultipleOf *float64 `json:"multipleOf,omitempty"`
+ Enum []interface{} `json:"enum,omitempty"`
+}
+
+// SetValidations defines all validations for a simple schema.
+//
+// NOTE: the input is the larger set of validations available for schemas.
+// For simple schemas, MinProperties and MaxProperties are ignored.
+func (v *CommonValidations) SetValidations(val SchemaValidations) {
+ v.Maximum = val.Maximum
+ v.ExclusiveMaximum = val.ExclusiveMaximum
+ v.Minimum = val.Minimum
+ v.ExclusiveMinimum = val.ExclusiveMinimum
+ v.MaxLength = val.MaxLength
+ v.MinLength = val.MinLength
+ v.Pattern = val.Pattern
+ v.MaxItems = val.MaxItems
+ v.MinItems = val.MinItems
+ v.UniqueItems = val.UniqueItems
+ v.MultipleOf = val.MultipleOf
+ v.Enum = val.Enum
+}
+
+type clearedValidation struct {
+ Validation string
+ Value interface{}
+}
+
+type clearedValidations []clearedValidation
+
+func (c clearedValidations) apply(cbs []func(string, interface{})) {
+ for _, cb := range cbs {
+ for _, cleared := range c {
+ cb(cleared.Validation, cleared.Value)
+ }
+ }
+}
+
+// ClearNumberValidations clears all number validations.
+//
+// Some callbacks may be set by the caller to capture changed values.
+func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, interface{})) {
+ done := make(clearedValidations, 0, 5)
+ defer func() {
+ done.apply(cbs)
+ }()
+
+ if v.Minimum != nil {
+ done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum})
+ v.Minimum = nil
+ }
+ if v.Maximum != nil {
+ done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum})
+ v.Maximum = nil
+ }
+ if v.ExclusiveMaximum {
+ done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum})
+ v.ExclusiveMaximum = false
+ }
+ if v.ExclusiveMinimum {
+ done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum})
+ v.ExclusiveMinimum = false
+ }
+ if v.MultipleOf != nil {
+ done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf})
+ v.MultipleOf = nil
+ }
+}
+
+// ClearStringValidations clears all string validations.
+//
+// Some callbacks may be set by the caller to capture changed values.
+func (v *CommonValidations) ClearStringValidations(cbs ...func(string, interface{})) {
+ done := make(clearedValidations, 0, 3)
+ defer func() {
+ done.apply(cbs)
+ }()
+
+ if v.Pattern != "" {
+ done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern})
+ v.Pattern = ""
+ }
+ if v.MinLength != nil {
+ done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength})
+ v.MinLength = nil
+ }
+ if v.MaxLength != nil {
+ done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength})
+ v.MaxLength = nil
+ }
+}
+
+// ClearArrayValidations clears all array validations.
+//
+// Some callbacks may be set by the caller to capture changed values.
+func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, interface{})) {
+ done := make(clearedValidations, 0, 3)
+ defer func() {
+ done.apply(cbs)
+ }()
+
+ if v.MaxItems != nil {
+ done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems})
+ v.MaxItems = nil
+ }
+ if v.MinItems != nil {
+ done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems})
+ v.MinItems = nil
+ }
+ if v.UniqueItems {
+ done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems})
+ v.UniqueItems = false
+ }
+}
+
+// Validations returns a clone of the validations for a simple schema.
+//
+// NOTE: in the context of simple schema objects, MinProperties, MaxProperties
+// and PatternProperties remain unset.
+func (v CommonValidations) Validations() SchemaValidations {
+ return SchemaValidations{
+ CommonValidations: v,
+ }
+}
+
+// HasNumberValidations indicates if the validations are for numbers or integers
+func (v CommonValidations) HasNumberValidations() bool {
+ return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil
+}
+
+// HasStringValidations indicates if the validations are for strings
+func (v CommonValidations) HasStringValidations() bool {
+ return v.MaxLength != nil || v.MinLength != nil || v.Pattern != ""
+}
+
+// HasArrayValidations indicates if the validations are for arrays
+func (v CommonValidations) HasArrayValidations() bool {
+ return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems
+}
+
+// HasEnum indicates if the validation includes some enum constraint
+func (v CommonValidations) HasEnum() bool {
+ return len(v.Enum) > 0
+}
+
+// SchemaValidations describes the validation properties of a schema
+//
+// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change
+// in the exported members: all initializers using litterals would fail.
+type SchemaValidations struct {
+ CommonValidations
+
+ PatternProperties SchemaProperties `json:"patternProperties,omitempty"`
+ MaxProperties *int64 `json:"maxProperties,omitempty"`
+ MinProperties *int64 `json:"minProperties,omitempty"`
+}
+
+// HasObjectValidations indicates if the validations are for objects
+func (v SchemaValidations) HasObjectValidations() bool {
+ return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil
+}
+
+// SetValidations for schema validations
+func (v *SchemaValidations) SetValidations(val SchemaValidations) {
+ v.CommonValidations.SetValidations(val)
+ v.PatternProperties = val.PatternProperties
+ v.MaxProperties = val.MaxProperties
+ v.MinProperties = val.MinProperties
+}
+
+// Validations for a schema
+func (v SchemaValidations) Validations() SchemaValidations {
+ val := v.CommonValidations.Validations()
+ val.PatternProperties = v.PatternProperties
+ val.MinProperties = v.MinProperties
+ val.MaxProperties = v.MaxProperties
+ return val
+}
+
+// ClearObjectValidations returns a clone of the validations with all object validations cleared.
+//
+// Some callbacks may be set by the caller to capture changed values.
+func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, interface{})) {
+ done := make(clearedValidations, 0, 3)
+ defer func() {
+ done.apply(cbs)
+ }()
+
+ if v.MaxProperties != nil {
+ done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties})
+ v.MaxProperties = nil
+ }
+ if v.MinProperties != nil {
+ done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties})
+ v.MinProperties = nil
+ }
+ if v.PatternProperties != nil {
+ done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties})
+ v.PatternProperties = nil
+ }
+}
diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go
new file mode 100644
index 00000000..945a4670
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/xml_object.go
@@ -0,0 +1,68 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// XMLObject a metadata object that allows for more fine-tuned XML model definitions.
+//
+// For more information: http://goo.gl/8us55a#xmlObject
+type XMLObject struct {
+ Name string `json:"name,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+ Prefix string `json:"prefix,omitempty"`
+ Attribute bool `json:"attribute,omitempty"`
+ Wrapped bool `json:"wrapped,omitempty"`
+}
+
+// WithName sets the xml name for the object
+func (x *XMLObject) WithName(name string) *XMLObject {
+ x.Name = name
+ return x
+}
+
+// WithNamespace sets the xml namespace for the object
+func (x *XMLObject) WithNamespace(namespace string) *XMLObject {
+ x.Namespace = namespace
+ return x
+}
+
+// WithPrefix sets the xml prefix for the object
+func (x *XMLObject) WithPrefix(prefix string) *XMLObject {
+ x.Prefix = prefix
+ return x
+}
+
+// AsAttribute flags this object as xml attribute
+func (x *XMLObject) AsAttribute() *XMLObject {
+ x.Attribute = true
+ return x
+}
+
+// AsElement flags this object as an xml node
+func (x *XMLObject) AsElement() *XMLObject {
+ x.Attribute = false
+ return x
+}
+
+// AsWrapped flags this object as wrapped, this is mostly useful for array types
+func (x *XMLObject) AsWrapped() *XMLObject {
+ x.Wrapped = true
+ return x
+}
+
+// AsUnwrapped flags this object as an xml node
+func (x *XMLObject) AsUnwrapped() *XMLObject {
+ x.Wrapped = false
+ return x
+}
diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig
new file mode 100644
index 00000000..3152da69
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-openapi/strfmt/.gitattributes b/vendor/github.com/go-openapi/strfmt/.gitattributes
new file mode 100644
index 00000000..d020be8e
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/.gitattributes
@@ -0,0 +1,2 @@
+*.go text eol=lf
+
diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore
new file mode 100644
index 00000000..dd91ed6a
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/.gitignore
@@ -0,0 +1,2 @@
+secrets.yml
+coverage.out
diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml
new file mode 100644
index 00000000..22f8d21c
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md
new file mode 100644
index 00000000..f6b39c6c
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/README.md
@@ -0,0 +1,87 @@
+# Strfmt [](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/strfmt)
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE)
+[](http://godoc.org/github.com/go-openapi/strfmt)
+[](https://goreportcard.com/report/github.com/go-openapi/strfmt)
+
+This package exposes a registry of data types to support string formats in the go-openapi toolkit.
+
+strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those.
+
+## Supported data formats
+go-openapi/strfmt follows the swagger 2.0 specification with the following formats
+defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types).
+
+It also provides convenient extensions to go-openapi users.
+
+- [x] JSON-schema draft 4 formats
+ - date-time
+ - email
+ - hostname
+ - ipv4
+ - ipv6
+ - uri
+- [x] swagger 2.0 format extensions
+ - binary
+ - byte (e.g. base64 encoded string)
+ - date (e.g. "1970-01-01")
+ - password
+- [x] go-openapi custom format extensions
+ - bsonobjectid (BSON objectID)
+ - creditcard
+ - duration (e.g. "3 weeks", "1ms")
+ - hexcolor (e.g. "#FFFFFF")
+ - isbn, isbn10, isbn13
+ - mac (e.g "01:02:03:04:05:06")
+ - rgbcolor (e.g. "rgb(100,100,100)")
+ - ssn
+ - uuid, uuid3, uuid4, uuid5
+ - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32")
+ - ulid (e.g. "00000PP9HGSBSSDZ1JTEXBJ0PW", [spec](https://github.com/ulid/spec))
+
+> NOTE: as the name stands for, this package is intended to support string formatting only.
+> It does not provide validation for numerical values with swagger format extension for JSON types "number" or
+> "integer" (e.g. float, double, int32...).
+
+## Type conversion
+
+All types defined here are stringers and may be converted to strings with `.String()`.
+Note that most types defined by this package may be converted directly to string like `string(Email{})`.
+
+`Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`.
+Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})`
+
+## Using pointers
+
+The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does
+with primitive types.
+
+## Format types
+Types defined in strfmt expose marshaling and validation capabilities.
+
+List of defined types:
+- Base64
+- CreditCard
+- Date
+- DateTime
+- Duration
+- Email
+- HexColor
+- Hostname
+- IPv4
+- IPv6
+- CIDR
+- ISBN
+- ISBN10
+- ISBN13
+- MAC
+- ObjectId
+- Password
+- RGBColor
+- SSN
+- URI
+- UUID
+- UUID3
+- UUID4
+- UUID5
+- [ULID](https://github.com/ulid/spec)
diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go
new file mode 100644
index 00000000..cfa9a526
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/bson.go
@@ -0,0 +1,165 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package strfmt
+
+import (
+ "database/sql/driver"
+ "fmt"
+
+ "go.mongodb.org/mongo-driver/bson"
+
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+ bsonprim "go.mongodb.org/mongo-driver/bson/primitive"
+)
+
+func init() {
+ var id ObjectId
+ // register this format in the default registry
+ Default.Add("bsonobjectid", &id, IsBSONObjectID)
+}
+
+// IsBSONObjectID returns true when the string is a valid BSON.ObjectId
+func IsBSONObjectID(str string) bool {
+ _, err := bsonprim.ObjectIDFromHex(str)
+ return err == nil
+}
+
+// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID)
+//
+// swagger:strfmt bsonobjectid
+type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck
+
+// NewObjectId creates a ObjectId from a Hex String
+func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck
+ oid, err := bsonprim.ObjectIDFromHex(hex)
+ if err != nil {
+ panic(err)
+ }
+ return ObjectId(oid)
+}
+
+// MarshalText turns this instance into text
+func (id ObjectId) MarshalText() ([]byte, error) {
+ oid := bsonprim.ObjectID(id)
+ if oid == bsonprim.NilObjectID {
+ return nil, nil
+ }
+ return []byte(oid.Hex()), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on
+ if len(data) == 0 {
+ *id = ObjectId(bsonprim.NilObjectID)
+ return nil
+ }
+ oidstr := string(data)
+ oid, err := bsonprim.ObjectIDFromHex(oidstr)
+ if err != nil {
+ return err
+ }
+ *id = ObjectId(oid)
+ return nil
+}
+
+// Scan read a value from a database driver
+func (id *ObjectId) Scan(raw interface{}) error {
+ var data []byte
+ switch v := raw.(type) {
+ case []byte:
+ data = v
+ case string:
+ data = []byte(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v)
+ }
+
+ return id.UnmarshalText(data)
+}
+
+// Value converts a value to a database driver value
+func (id ObjectId) Value() (driver.Value, error) {
+ return driver.Value(bsonprim.ObjectID(id).Hex()), nil
+}
+
+func (id ObjectId) String() string {
+ return bsonprim.ObjectID(id).Hex()
+}
+
+// MarshalJSON returns the ObjectId as JSON
+func (id ObjectId) MarshalJSON() ([]byte, error) {
+ return bsonprim.ObjectID(id).MarshalJSON()
+}
+
+// UnmarshalJSON sets the ObjectId from JSON
+func (id *ObjectId) UnmarshalJSON(data []byte) error {
+ var obj bsonprim.ObjectID
+ if err := obj.UnmarshalJSON(data); err != nil {
+ return err
+ }
+ *id = ObjectId(obj)
+ return nil
+}
+
+// MarshalBSON renders the object id as a BSON document
+func (id ObjectId) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)})
+}
+
+// UnmarshalBSON reads the objectId from a BSON document
+func (id *ObjectId) UnmarshalBSON(data []byte) error {
+ var obj struct {
+ Data bsonprim.ObjectID
+ }
+ if err := bson.Unmarshal(data, &obj); err != nil {
+ return err
+ }
+ *id = ObjectId(obj.Data)
+ return nil
+}
+
+// MarshalBSONValue is an interface implemented by types that can marshal themselves
+// into a BSON document represented as bytes. The bytes returned must be a valid
+// BSON document if the error is nil.
+func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) {
+ oid := bsonprim.ObjectID(id)
+ return bson.TypeObjectID, oid[:], nil
+}
+
+// UnmarshalBSONValue is an interface implemented by types that can unmarshal a
+// BSON value representation of themselves. The BSON bytes and type can be
+// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
+// wishes to retain the data after returning.
+func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error {
+ var oid bsonprim.ObjectID
+ copy(oid[:], data)
+ *id = ObjectId(oid)
+ return nil
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (id *ObjectId) DeepCopyInto(out *ObjectId) {
+ *out = *id
+}
+
+// DeepCopy copies the receiver into a new ObjectId.
+func (id *ObjectId) DeepCopy() *ObjectId {
+ if id == nil {
+ return nil
+ }
+ out := new(ObjectId)
+ id.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go
new file mode 100644
index 00000000..3c93381c
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/date.go
@@ -0,0 +1,187 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package strfmt
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+
+ "go.mongodb.org/mongo-driver/bson"
+)
+
+func init() {
+ d := Date{}
+ // register this format in the default registry
+ Default.Add("date", &d, IsDate)
+}
+
+// IsDate returns true when the string is a valid date
+func IsDate(str string) bool {
+ _, err := time.Parse(RFC3339FullDate, str)
+ return err == nil
+}
+
+const (
+ // RFC3339FullDate represents a full-date as specified by RFC3339
+ // See: http://goo.gl/xXOvVd
+ RFC3339FullDate = "2006-01-02"
+)
+
+// Date represents a date from the API
+//
+// swagger:strfmt date
+type Date time.Time
+
+// String converts this date into a string
+func (d Date) String() string {
+ return time.Time(d).Format(RFC3339FullDate)
+}
+
+// UnmarshalText parses a text representation into a date type
+func (d *Date) UnmarshalText(text []byte) error {
+ if len(text) == 0 {
+ return nil
+ }
+ dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation)
+ if err != nil {
+ return err
+ }
+ *d = Date(dd)
+ return nil
+}
+
+// MarshalText serializes this date type to string
+func (d Date) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// Scan scans a Date value from database driver type.
+func (d *Date) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ return d.UnmarshalText(v)
+ case string:
+ return d.UnmarshalText([]byte(v))
+ case time.Time:
+ *d = Date(v)
+ return nil
+ case nil:
+ *d = Date{}
+ return nil
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v)
+ }
+}
+
+// Value converts Date to a primitive value ready to written to a database.
+func (d Date) Value() (driver.Value, error) {
+ return driver.Value(d.String()), nil
+}
+
+// MarshalJSON returns the Date as JSON
+func (d Date) MarshalJSON() ([]byte, error) {
+ return json.Marshal(time.Time(d).Format(RFC3339FullDate))
+}
+
+// UnmarshalJSON sets the Date from JSON
+func (d *Date) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var strdate string
+ if err := json.Unmarshal(data, &strdate); err != nil {
+ return err
+ }
+ tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation)
+ if err != nil {
+ return err
+ }
+ *d = Date(tt)
+ return nil
+}
+
+func (d Date) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": d.String()})
+}
+
+func (d *Date) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if data, ok := m["data"].(string); ok {
+ rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation)
+ if err != nil {
+ return err
+ }
+ *d = Date(rd)
+ return nil
+ }
+
+ return errors.New("couldn't unmarshal bson bytes value as Date")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (d *Date) DeepCopyInto(out *Date) {
+ *out = *d
+}
+
+// DeepCopy copies the receiver into a new Date.
+func (d *Date) DeepCopy() *Date {
+ if d == nil {
+ return nil
+ }
+ out := new(Date)
+ d.DeepCopyInto(out)
+ return out
+}
+
+// GobEncode implements the gob.GobEncoder interface.
+func (d Date) GobEncode() ([]byte, error) {
+ return d.MarshalBinary()
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (d *Date) GobDecode(data []byte) error {
+ return d.UnmarshalBinary(data)
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d Date) MarshalBinary() ([]byte, error) {
+ return time.Time(d).MarshalBinary()
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Date) UnmarshalBinary(data []byte) error {
+ var original time.Time
+
+ err := original.UnmarshalBinary(data)
+ if err != nil {
+ return err
+ }
+
+ *d = Date(original)
+
+ return nil
+}
+
+// Equal checks if two Date instances are equal
+func (d Date) Equal(d2 Date) bool {
+ return time.Time(d).Equal(time.Time(d2))
+}
diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go
new file mode 100644
index 00000000..28137140
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/default.go
@@ -0,0 +1,2051 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package strfmt
+
+import (
+ "database/sql/driver"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/mail"
+ "regexp"
+ "strings"
+
+ "github.com/asaskevich/govalidator"
+ "github.com/google/uuid"
+ "go.mongodb.org/mongo-driver/bson"
+)
+
+const (
+ // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114
+ // A string instance is valid against this attribute if it is a valid
+ // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034].
+ // http://tools.ietf.org/html/rfc1034#section-3.5
+ // ::= any one of the ten digits 0 through 9
+ // var digit = /[0-9]/;
+ // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case
+ // var letter = /[a-zA-Z]/;
+ // ::= |
+ // var letDig = /[0-9a-zA-Z]/;
+ // ::= | "-"
+ // var letDigHyp = /[-0-9a-zA-Z]/;
+ // ::= |
+ // var ldhStr = /[-0-9a-zA-Z]+/;
+ // ::= [ [ ] ]
+ // var label = /[a-zA-Z](([-0-9a-zA-Z]+)?[0-9a-zA-Z])?/;
+ // ::= | "."
+ // var subdomain = /^[a-zA-Z](([-0-9a-zA-Z]+)?[0-9a-zA-Z])?(\.[a-zA-Z](([-0-9a-zA-Z]+)?[0-9a-zA-Z])?)*$/;
+ // ::= | " "
+ //
+ // Additional validations:
+ // - for FDQNs, top-level domain (e.g. ".com"), is at least to letters long (no special characters here)
+ // - hostnames may start with a digit [RFC1123]
+ // - special registered names with an underscore ('_') are not allowed in this context
+ // - dashes are permitted, but not at the start or the end of a segment
+ // - long top-level domain names (e.g. example.london) are permitted
+ // - symbol unicode points are permitted (e.g. emoji) (not for top-level domain)
+ HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$`
+
+ // json null type
+ jsonNull = "null"
+)
+
+const (
+ // UUIDPattern Regex for UUID that allows uppercase
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)`
+
+ // UUID3Pattern Regex for UUID3 that allows uppercase
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)`
+
+ // UUID4Pattern Regex for UUID4 that allows uppercase
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)`
+
+ // UUID5Pattern Regex for UUID5 that allows uppercase
+ //
+ // Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
+ UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)`
+)
+
+var (
+ rxHostname = regexp.MustCompile(HostnamePattern)
+)
+
+// IsHostname returns true when the string is a valid hostname
+func IsHostname(str string) bool {
+ if !rxHostname.MatchString(str) {
+ return false
+ }
+
+ // the sum of all label octets and label lengths is limited to 255.
+ if len(str) > 255 {
+ return false
+ }
+
+ // Each node has a label, which is zero to 63 octets in length
+ parts := strings.Split(str, ".")
+ valid := true
+ for _, p := range parts {
+ if len(p) > 63 {
+ valid = false
+ }
+ }
+ return valid
+}
+
+// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed
+func IsUUID(str string) bool {
+ _, err := uuid.Parse(str)
+ return err == nil
+}
+
+// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed
+func IsUUID3(str string) bool {
+ id, err := uuid.Parse(str)
+ return err == nil && id.Version() == uuid.Version(3)
+}
+
+// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed
+func IsUUID4(str string) bool {
+ id, err := uuid.Parse(str)
+ return err == nil && id.Version() == uuid.Version(4)
+}
+
+// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed
+func IsUUID5(str string) bool {
+ id, err := uuid.Parse(str)
+ return err == nil && id.Version() == uuid.Version(5)
+}
+
+// IsEmail validates an email address.
+func IsEmail(str string) bool {
+ addr, e := mail.ParseAddress(str)
+ return e == nil && addr.Address != ""
+}
+
+func init() {
+ // register formats in the default registry:
+ // - byte
+ // - creditcard
+ // - email
+ // - hexcolor
+ // - hostname
+ // - ipv4
+ // - ipv6
+ // - cidr
+ // - isbn
+ // - isbn10
+ // - isbn13
+ // - mac
+ // - password
+ // - rgbcolor
+ // - ssn
+ // - uri
+ // - uuid
+ // - uuid3
+ // - uuid4
+ // - uuid5
+ u := URI("")
+ Default.Add("uri", &u, govalidator.IsRequestURI)
+
+ eml := Email("")
+ Default.Add("email", &eml, IsEmail)
+
+ hn := Hostname("")
+ Default.Add("hostname", &hn, IsHostname)
+
+ ip4 := IPv4("")
+ Default.Add("ipv4", &ip4, govalidator.IsIPv4)
+
+ ip6 := IPv6("")
+ Default.Add("ipv6", &ip6, govalidator.IsIPv6)
+
+ cidr := CIDR("")
+ Default.Add("cidr", &cidr, govalidator.IsCIDR)
+
+ mac := MAC("")
+ Default.Add("mac", &mac, govalidator.IsMAC)
+
+ uid := UUID("")
+ Default.Add("uuid", &uid, IsUUID)
+
+ uid3 := UUID3("")
+ Default.Add("uuid3", &uid3, IsUUID3)
+
+ uid4 := UUID4("")
+ Default.Add("uuid4", &uid4, IsUUID4)
+
+ uid5 := UUID5("")
+ Default.Add("uuid5", &uid5, IsUUID5)
+
+ isbn := ISBN("")
+ Default.Add("isbn", &isbn, func(str string) bool { return govalidator.IsISBN10(str) || govalidator.IsISBN13(str) })
+
+ isbn10 := ISBN10("")
+ Default.Add("isbn10", &isbn10, govalidator.IsISBN10)
+
+ isbn13 := ISBN13("")
+ Default.Add("isbn13", &isbn13, govalidator.IsISBN13)
+
+ cc := CreditCard("")
+ Default.Add("creditcard", &cc, govalidator.IsCreditCard)
+
+ ssn := SSN("")
+ Default.Add("ssn", &ssn, govalidator.IsSSN)
+
+ hc := HexColor("")
+ Default.Add("hexcolor", &hc, govalidator.IsHexcolor)
+
+ rc := RGBColor("")
+ Default.Add("rgbcolor", &rc, govalidator.IsRGBcolor)
+
+ b64 := Base64([]byte(nil))
+ Default.Add("byte", &b64, govalidator.IsBase64)
+
+ pw := Password("")
+ Default.Add("password", &pw, func(_ string) bool { return true })
+}
+
+// Base64 represents a base64 encoded string, using URLEncoding alphabet
+//
+// swagger:strfmt byte
+type Base64 []byte
+
+// MarshalText turns this instance into text
+func (b Base64) MarshalText() ([]byte, error) {
+ enc := base64.URLEncoding
+ src := []byte(b)
+ buf := make([]byte, enc.EncodedLen(len(src)))
+ enc.Encode(buf, src)
+ return buf, nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (b *Base64) UnmarshalText(data []byte) error { // validation is performed later on
+ enc := base64.URLEncoding
+ dbuf := make([]byte, enc.DecodedLen(len(data)))
+
+ n, err := enc.Decode(dbuf, data)
+ if err != nil {
+ return err
+ }
+
+ *b = dbuf[:n]
+ return nil
+}
+
+// Scan read a value from a database driver
+func (b *Base64) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ dbuf := make([]byte, base64.StdEncoding.DecodedLen(len(v)))
+ n, err := base64.StdEncoding.Decode(dbuf, v)
+ if err != nil {
+ return err
+ }
+ *b = dbuf[:n]
+ case string:
+ vv, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err
+ }
+ *b = Base64(vv)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.Base64 from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (b Base64) Value() (driver.Value, error) {
+ return driver.Value(b.String()), nil
+}
+
+func (b Base64) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(b))
+}
+
+// MarshalJSON returns the Base64 as JSON
+func (b Base64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(b.String())
+}
+
+// UnmarshalJSON sets the Base64 from JSON
+func (b *Base64) UnmarshalJSON(data []byte) error {
+ var b64str string
+ if err := json.Unmarshal(data, &b64str); err != nil {
+ return err
+ }
+ vb, err := base64.StdEncoding.DecodeString(b64str)
+ if err != nil {
+ return err
+ }
+ *b = Base64(vb)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (b Base64) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": b.String()})
+}
+
+// UnmarshalBSON document into this value
+func (b *Base64) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if bd, ok := m["data"].(string); ok {
+ vb, err := base64.StdEncoding.DecodeString(bd)
+ if err != nil {
+ return err
+ }
+ *b = Base64(vb)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as base64")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (b *Base64) DeepCopyInto(out *Base64) {
+ *out = *b
+}
+
+// DeepCopy copies the receiver into a new Base64.
+func (b *Base64) DeepCopy() *Base64 {
+ if b == nil {
+ return nil
+ }
+ out := new(Base64)
+ b.DeepCopyInto(out)
+ return out
+}
+
+// URI represents the uri string format as specified by the json schema spec
+//
+// swagger:strfmt uri
+type URI string
+
+// MarshalText turns this instance into text
+func (u URI) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *URI) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = URI(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *URI) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = URI(string(v))
+ case string:
+ *u = URI(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u URI) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u URI) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the URI as JSON
+func (u URI) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the URI from JSON
+func (u *URI) UnmarshalJSON(data []byte) error {
+ var uristr string
+ if err := json.Unmarshal(data, &uristr); err != nil {
+ return err
+ }
+ *u = URI(uristr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u URI) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *URI) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = URI(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as uri")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *URI) DeepCopyInto(out *URI) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new URI.
+func (u *URI) DeepCopy() *URI {
+ if u == nil {
+ return nil
+ }
+ out := new(URI)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// Email represents the email string format as specified by the json schema spec
+//
+// swagger:strfmt email
+type Email string
+
+// MarshalText turns this instance into text
+func (e Email) MarshalText() ([]byte, error) {
+ return []byte(string(e)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (e *Email) UnmarshalText(data []byte) error { // validation is performed later on
+ *e = Email(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (e *Email) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *e = Email(string(v))
+ case string:
+ *e = Email(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.Email from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (e Email) Value() (driver.Value, error) {
+ return driver.Value(string(e)), nil
+}
+
+func (e Email) String() string {
+ return string(e)
+}
+
+// MarshalJSON returns the Email as JSON
+func (e Email) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(e))
+}
+
+// UnmarshalJSON sets the Email from JSON
+func (e *Email) UnmarshalJSON(data []byte) error {
+ var estr string
+ if err := json.Unmarshal(data, &estr); err != nil {
+ return err
+ }
+ *e = Email(estr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (e Email) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": e.String()})
+}
+
+// UnmarshalBSON document into this value
+func (e *Email) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *e = Email(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as email")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (e *Email) DeepCopyInto(out *Email) {
+ *out = *e
+}
+
+// DeepCopy copies the receiver into a new Email.
+func (e *Email) DeepCopy() *Email {
+ if e == nil {
+ return nil
+ }
+ out := new(Email)
+ e.DeepCopyInto(out)
+ return out
+}
+
+// Hostname represents the hostname string format as specified by the json schema spec
+//
+// swagger:strfmt hostname
+type Hostname string
+
+// MarshalText turns this instance into text
+func (h Hostname) MarshalText() ([]byte, error) {
+ return []byte(string(h)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (h *Hostname) UnmarshalText(data []byte) error { // validation is performed later on
+ *h = Hostname(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (h *Hostname) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *h = Hostname(string(v))
+ case string:
+ *h = Hostname(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.Hostname from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (h Hostname) Value() (driver.Value, error) {
+ return driver.Value(string(h)), nil
+}
+
+func (h Hostname) String() string {
+ return string(h)
+}
+
+// MarshalJSON returns the Hostname as JSON
+func (h Hostname) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(h))
+}
+
+// UnmarshalJSON sets the Hostname from JSON
+func (h *Hostname) UnmarshalJSON(data []byte) error {
+ var hstr string
+ if err := json.Unmarshal(data, &hstr); err != nil {
+ return err
+ }
+ *h = Hostname(hstr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (h Hostname) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": h.String()})
+}
+
+// UnmarshalBSON document into this value
+func (h *Hostname) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *h = Hostname(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as hostname")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (h *Hostname) DeepCopyInto(out *Hostname) {
+ *out = *h
+}
+
+// DeepCopy copies the receiver into a new Hostname.
+func (h *Hostname) DeepCopy() *Hostname {
+ if h == nil {
+ return nil
+ }
+ out := new(Hostname)
+ h.DeepCopyInto(out)
+ return out
+}
+
+// IPv4 represents an IP v4 address
+//
+// swagger:strfmt ipv4
+type IPv4 string
+
+// MarshalText turns this instance into text
+func (u IPv4) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *IPv4) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = IPv4(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *IPv4) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = IPv4(string(v))
+ case string:
+ *u = IPv4(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u IPv4) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u IPv4) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the IPv4 as JSON
+func (u IPv4) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the IPv4 from JSON
+func (u *IPv4) UnmarshalJSON(data []byte) error {
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = IPv4(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u IPv4) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *IPv4) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = IPv4(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as ipv4")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *IPv4) DeepCopyInto(out *IPv4) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new IPv4.
+func (u *IPv4) DeepCopy() *IPv4 {
+ if u == nil {
+ return nil
+ }
+ out := new(IPv4)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// IPv6 represents an IP v6 address
+//
+// swagger:strfmt ipv6
+type IPv6 string
+
+// MarshalText turns this instance into text
+func (u IPv6) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *IPv6) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = IPv6(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *IPv6) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = IPv6(string(v))
+ case string:
+ *u = IPv6(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.IPv6 from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u IPv6) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u IPv6) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the IPv6 as JSON
+func (u IPv6) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the IPv6 from JSON
+func (u *IPv6) UnmarshalJSON(data []byte) error {
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = IPv6(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u IPv6) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *IPv6) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = IPv6(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as ipv6")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *IPv6) DeepCopyInto(out *IPv6) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new IPv6.
+func (u *IPv6) DeepCopy() *IPv6 {
+ if u == nil {
+ return nil
+ }
+ out := new(IPv6)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// CIDR represents a Classless Inter-Domain Routing notation
+//
+// swagger:strfmt cidr
+type CIDR string
+
+// MarshalText turns this instance into text
+func (u CIDR) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *CIDR) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = CIDR(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *CIDR) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = CIDR(string(v))
+ case string:
+ *u = CIDR(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.CIDR from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u CIDR) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u CIDR) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the CIDR as JSON
+func (u CIDR) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the CIDR from JSON
+func (u *CIDR) UnmarshalJSON(data []byte) error {
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = CIDR(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u CIDR) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *CIDR) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = CIDR(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as CIDR")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *CIDR) DeepCopyInto(out *CIDR) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new CIDR.
+func (u *CIDR) DeepCopy() *CIDR {
+ if u == nil {
+ return nil
+ }
+ out := new(CIDR)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// MAC represents a 48 bit MAC address
+//
+// swagger:strfmt mac
+type MAC string
+
+// MarshalText turns this instance into text
+func (u MAC) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *MAC) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = MAC(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *MAC) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = MAC(string(v))
+ case string:
+ *u = MAC(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u MAC) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u MAC) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the MAC as JSON
+func (u MAC) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the MAC from JSON
+func (u *MAC) UnmarshalJSON(data []byte) error {
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = MAC(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u MAC) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *MAC) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = MAC(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as MAC")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *MAC) DeepCopyInto(out *MAC) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new MAC.
+func (u *MAC) DeepCopy() *MAC {
+ if u == nil {
+ return nil
+ }
+ out := new(MAC)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// UUID represents a uuid string format
+//
+// swagger:strfmt uuid
+type UUID string
+
+// MarshalText turns this instance into text
+func (u UUID) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *UUID) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = UUID(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *UUID) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = UUID(string(v))
+ case string:
+ *u = UUID(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.UUID from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u UUID) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u UUID) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the UUID as JSON
+func (u UUID) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the UUID from JSON
+func (u *UUID) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = UUID(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u UUID) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *UUID) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = UUID(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as UUID")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *UUID) DeepCopyInto(out *UUID) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new UUID.
+func (u *UUID) DeepCopy() *UUID {
+ if u == nil {
+ return nil
+ }
+ out := new(UUID)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// UUID3 represents a uuid3 string format
+//
+// swagger:strfmt uuid3
+type UUID3 string
+
+// MarshalText turns this instance into text
+func (u UUID3) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *UUID3) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = UUID3(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *UUID3) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = UUID3(string(v))
+ case string:
+ *u = UUID3(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.UUID3 from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u UUID3) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u UUID3) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the UUID as JSON
+func (u UUID3) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the UUID from JSON
+func (u *UUID3) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = UUID3(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u UUID3) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *UUID3) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = UUID3(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as UUID3")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *UUID3) DeepCopyInto(out *UUID3) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new UUID3.
+func (u *UUID3) DeepCopy() *UUID3 {
+ if u == nil {
+ return nil
+ }
+ out := new(UUID3)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// UUID4 represents a uuid4 string format
+//
+// swagger:strfmt uuid4
+type UUID4 string
+
+// MarshalText turns this instance into text
+func (u UUID4) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *UUID4) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = UUID4(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *UUID4) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = UUID4(string(v))
+ case string:
+ *u = UUID4(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.UUID4 from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u UUID4) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u UUID4) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the UUID as JSON
+func (u UUID4) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the UUID from JSON
+func (u *UUID4) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = UUID4(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u UUID4) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *UUID4) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = UUID4(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as UUID4")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *UUID4) DeepCopyInto(out *UUID4) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new UUID4.
+func (u *UUID4) DeepCopy() *UUID4 {
+ if u == nil {
+ return nil
+ }
+ out := new(UUID4)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// UUID5 represents a uuid5 string format
+//
+// swagger:strfmt uuid5
+type UUID5 string
+
+// MarshalText turns this instance into text
+func (u UUID5) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *UUID5) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = UUID5(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *UUID5) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = UUID5(string(v))
+ case string:
+ *u = UUID5(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.UUID5 from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u UUID5) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u UUID5) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the UUID as JSON
+func (u UUID5) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the UUID from JSON
+func (u *UUID5) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = UUID5(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u UUID5) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *UUID5) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = UUID5(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as UUID5")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *UUID5) DeepCopyInto(out *UUID5) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new UUID5.
+func (u *UUID5) DeepCopy() *UUID5 {
+ if u == nil {
+ return nil
+ }
+ out := new(UUID5)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// ISBN represents an isbn string format
+//
+// swagger:strfmt isbn
+type ISBN string
+
+// MarshalText turns this instance into text
+func (u ISBN) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *ISBN) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = ISBN(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *ISBN) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = ISBN(string(v))
+ case string:
+ *u = ISBN(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.ISBN from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u ISBN) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u ISBN) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the ISBN as JSON
+func (u ISBN) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the ISBN from JSON
+func (u *ISBN) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = ISBN(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u ISBN) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *ISBN) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = ISBN(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as ISBN")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *ISBN) DeepCopyInto(out *ISBN) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new ISBN.
+func (u *ISBN) DeepCopy() *ISBN {
+ if u == nil {
+ return nil
+ }
+ out := new(ISBN)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// ISBN10 represents an isbn 10 string format
+//
+// swagger:strfmt isbn10
+type ISBN10 string
+
+// MarshalText turns this instance into text
+func (u ISBN10) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *ISBN10) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = ISBN10(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *ISBN10) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = ISBN10(string(v))
+ case string:
+ *u = ISBN10(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.ISBN10 from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u ISBN10) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u ISBN10) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the ISBN10 as JSON
+func (u ISBN10) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the ISBN10 from JSON
+func (u *ISBN10) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = ISBN10(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u ISBN10) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *ISBN10) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = ISBN10(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as ISBN10")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *ISBN10) DeepCopyInto(out *ISBN10) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new ISBN10.
+func (u *ISBN10) DeepCopy() *ISBN10 {
+ if u == nil {
+ return nil
+ }
+ out := new(ISBN10)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// ISBN13 represents an isbn 13 string format
+//
+// swagger:strfmt isbn13
+type ISBN13 string
+
+// MarshalText turns this instance into text
+func (u ISBN13) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *ISBN13) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = ISBN13(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *ISBN13) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = ISBN13(string(v))
+ case string:
+ *u = ISBN13(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.ISBN13 from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u ISBN13) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u ISBN13) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the ISBN13 as JSON
+func (u ISBN13) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the ISBN13 from JSON
+func (u *ISBN13) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = ISBN13(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u ISBN13) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *ISBN13) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = ISBN13(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as ISBN13")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *ISBN13) DeepCopyInto(out *ISBN13) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new ISBN13.
+func (u *ISBN13) DeepCopy() *ISBN13 {
+ if u == nil {
+ return nil
+ }
+ out := new(ISBN13)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// CreditCard represents a credit card string format
+//
+// swagger:strfmt creditcard
+type CreditCard string
+
+// MarshalText turns this instance into text
+func (u CreditCard) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *CreditCard) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = CreditCard(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *CreditCard) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = CreditCard(string(v))
+ case string:
+ *u = CreditCard(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.CreditCard from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u CreditCard) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u CreditCard) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the CreditCard as JSON
+func (u CreditCard) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the CreditCard from JSON
+func (u *CreditCard) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = CreditCard(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u CreditCard) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *CreditCard) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = CreditCard(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as CreditCard")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *CreditCard) DeepCopyInto(out *CreditCard) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new CreditCard.
+func (u *CreditCard) DeepCopy() *CreditCard {
+ if u == nil {
+ return nil
+ }
+ out := new(CreditCard)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// SSN represents a social security string format
+//
+// swagger:strfmt ssn
+type SSN string
+
+// MarshalText turns this instance into text
+func (u SSN) MarshalText() ([]byte, error) {
+ return []byte(string(u)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *SSN) UnmarshalText(data []byte) error { // validation is performed later on
+ *u = SSN(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (u *SSN) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *u = SSN(string(v))
+ case string:
+ *u = SSN(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.SSN from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (u SSN) Value() (driver.Value, error) {
+ return driver.Value(string(u)), nil
+}
+
+func (u SSN) String() string {
+ return string(u)
+}
+
+// MarshalJSON returns the SSN as JSON
+func (u SSN) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(u))
+}
+
+// UnmarshalJSON sets the SSN from JSON
+func (u *SSN) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *u = SSN(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u SSN) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *SSN) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *u = SSN(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as SSN")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *SSN) DeepCopyInto(out *SSN) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new SSN.
+func (u *SSN) DeepCopy() *SSN {
+ if u == nil {
+ return nil
+ }
+ out := new(SSN)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// HexColor represents a hex color string format
+//
+// swagger:strfmt hexcolor
+type HexColor string
+
+// MarshalText turns this instance into text
+func (h HexColor) MarshalText() ([]byte, error) {
+ return []byte(string(h)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (h *HexColor) UnmarshalText(data []byte) error { // validation is performed later on
+ *h = HexColor(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (h *HexColor) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *h = HexColor(string(v))
+ case string:
+ *h = HexColor(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.HexColor from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (h HexColor) Value() (driver.Value, error) {
+ return driver.Value(string(h)), nil
+}
+
+func (h HexColor) String() string {
+ return string(h)
+}
+
+// MarshalJSON returns the HexColor as JSON
+func (h HexColor) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(h))
+}
+
+// UnmarshalJSON sets the HexColor from JSON
+func (h *HexColor) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *h = HexColor(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (h HexColor) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": h.String()})
+}
+
+// UnmarshalBSON document into this value
+func (h *HexColor) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *h = HexColor(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as HexColor")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (h *HexColor) DeepCopyInto(out *HexColor) {
+ *out = *h
+}
+
+// DeepCopy copies the receiver into a new HexColor.
+func (h *HexColor) DeepCopy() *HexColor {
+ if h == nil {
+ return nil
+ }
+ out := new(HexColor)
+ h.DeepCopyInto(out)
+ return out
+}
+
+// RGBColor represents a RGB color string format
+//
+// swagger:strfmt rgbcolor
+type RGBColor string
+
+// MarshalText turns this instance into text
+func (r RGBColor) MarshalText() ([]byte, error) {
+ return []byte(string(r)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (r *RGBColor) UnmarshalText(data []byte) error { // validation is performed later on
+ *r = RGBColor(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (r *RGBColor) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *r = RGBColor(string(v))
+ case string:
+ *r = RGBColor(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.RGBColor from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (r RGBColor) Value() (driver.Value, error) {
+ return driver.Value(string(r)), nil
+}
+
+func (r RGBColor) String() string {
+ return string(r)
+}
+
+// MarshalJSON returns the RGBColor as JSON
+func (r RGBColor) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(r))
+}
+
+// UnmarshalJSON sets the RGBColor from JSON
+func (r *RGBColor) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *r = RGBColor(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (r RGBColor) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": r.String()})
+}
+
+// UnmarshalBSON document into this value
+func (r *RGBColor) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *r = RGBColor(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as RGBColor")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (r *RGBColor) DeepCopyInto(out *RGBColor) {
+ *out = *r
+}
+
+// DeepCopy copies the receiver into a new RGBColor.
+func (r *RGBColor) DeepCopy() *RGBColor {
+ if r == nil {
+ return nil
+ }
+ out := new(RGBColor)
+ r.DeepCopyInto(out)
+ return out
+}
+
+// Password represents a password.
+// This has no validations and is mainly used as a marker for UI components.
+//
+// swagger:strfmt password
+type Password string
+
+// MarshalText turns this instance into text
+func (r Password) MarshalText() ([]byte, error) {
+ return []byte(string(r)), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (r *Password) UnmarshalText(data []byte) error { // validation is performed later on
+ *r = Password(string(data))
+ return nil
+}
+
+// Scan read a value from a database driver
+func (r *Password) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ case []byte:
+ *r = Password(string(v))
+ case string:
+ *r = Password(v)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.Password from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts a value to a database driver value
+func (r Password) Value() (driver.Value, error) {
+ return driver.Value(string(r)), nil
+}
+
+func (r Password) String() string {
+ return string(r)
+}
+
+// MarshalJSON returns the Password as JSON
+func (r Password) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(r))
+}
+
+// UnmarshalJSON sets the Password from JSON
+func (r *Password) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ *r = Password(ustr)
+ return nil
+}
+
+// MarshalBSON document from this value
+func (r Password) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": r.String()})
+}
+
+// UnmarshalBSON document into this value
+func (r *Password) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ *r = Password(ud)
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as Password")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (r *Password) DeepCopyInto(out *Password) {
+ *out = *r
+}
+
+// DeepCopy copies the receiver into a new Password.
+func (r *Password) DeepCopy() *Password {
+ if r == nil {
+ return nil
+ }
+ out := new(Password)
+ r.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/go-openapi/strfmt/doc.go b/vendor/github.com/go-openapi/strfmt/doc.go
new file mode 100644
index 00000000..41aebe6d
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package strfmt contains custom string formats
+//
+// TODO: add info on how to define and register a custom format
+package strfmt
diff --git a/vendor/github.com/go-openapi/strfmt/duration.go b/vendor/github.com/go-openapi/strfmt/duration.go
new file mode 100644
index 00000000..6284b821
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/duration.go
@@ -0,0 +1,211 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package strfmt
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "go.mongodb.org/mongo-driver/bson"
+)
+
+func init() {
+ d := Duration(0)
+ // register this format in the default registry
+ Default.Add("duration", &d, IsDuration)
+}
+
+var (
+ timeUnits = [][]string{
+ {"ns", "nano"},
+ {"us", "µs", "micro"},
+ {"ms", "milli"},
+ {"s", "sec"},
+ {"m", "min"},
+ {"h", "hr", "hour"},
+ {"d", "day"},
+ {"w", "wk", "week"},
+ }
+
+ timeMultiplier = map[string]time.Duration{
+ "ns": time.Nanosecond,
+ "us": time.Microsecond,
+ "ms": time.Millisecond,
+ "s": time.Second,
+ "m": time.Minute,
+ "h": time.Hour,
+ "d": 24 * time.Hour,
+ "w": 7 * 24 * time.Hour,
+ }
+
+ durationMatcher = regexp.MustCompile(`((\d+)\s*([A-Za-zµ]+))`)
+)
+
+// IsDuration returns true if the provided string is a valid duration
+func IsDuration(str string) bool {
+ _, err := ParseDuration(str)
+ return err == nil
+}
+
+// Duration represents a duration
+//
+// Duration stores a period of time as a nanosecond count, with the largest
+// repesentable duration being approximately 290 years.
+//
+// swagger:strfmt duration
+type Duration time.Duration
+
+// MarshalText turns this instance into text
+func (d Duration) MarshalText() ([]byte, error) {
+ return []byte(time.Duration(d).String()), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (d *Duration) UnmarshalText(data []byte) error { // validation is performed later on
+ dd, err := ParseDuration(string(data))
+ if err != nil {
+ return err
+ }
+ *d = Duration(dd)
+ return nil
+}
+
+// ParseDuration parses a duration from a string, compatible with scala duration syntax
+func ParseDuration(cand string) (time.Duration, error) {
+ if dur, err := time.ParseDuration(cand); err == nil {
+ return dur, nil
+ }
+
+ var dur time.Duration
+ ok := false
+ for _, match := range durationMatcher.FindAllStringSubmatch(cand, -1) {
+
+ factor, err := strconv.Atoi(match[2]) // converts string to int
+ if err != nil {
+ return 0, err
+ }
+ unit := strings.ToLower(strings.TrimSpace(match[3]))
+
+ for _, variants := range timeUnits {
+ last := len(variants) - 1
+ multiplier := timeMultiplier[variants[0]]
+
+ for i, variant := range variants {
+ if (last == i && strings.HasPrefix(unit, variant)) || strings.EqualFold(variant, unit) {
+ ok = true
+ dur += (time.Duration(factor) * multiplier)
+ }
+ }
+ }
+ }
+
+ if ok {
+ return dur, nil
+ }
+ return 0, fmt.Errorf("unable to parse %s as duration", cand)
+}
+
+// Scan reads a Duration value from database driver type.
+func (d *Duration) Scan(raw interface{}) error {
+ switch v := raw.(type) {
+ // TODO: case []byte: // ?
+ case int64:
+ *d = Duration(v)
+ case float64:
+ *d = Duration(int64(v))
+ case nil:
+ *d = Duration(0)
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.Duration from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts Duration to a primitive value ready to be written to a database.
+func (d Duration) Value() (driver.Value, error) {
+ return driver.Value(int64(d)), nil
+}
+
+// String converts this duration to a string
+func (d Duration) String() string {
+ return time.Duration(d).String()
+}
+
+// MarshalJSON returns the Duration as JSON
+func (d Duration) MarshalJSON() ([]byte, error) {
+ return json.Marshal(time.Duration(d).String())
+}
+
+// UnmarshalJSON sets the Duration from JSON
+func (d *Duration) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+
+ var dstr string
+ if err := json.Unmarshal(data, &dstr); err != nil {
+ return err
+ }
+ tt, err := ParseDuration(dstr)
+ if err != nil {
+ return err
+ }
+ *d = Duration(tt)
+ return nil
+}
+
+func (d Duration) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": d.String()})
+}
+
+func (d *Duration) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if data, ok := m["data"].(string); ok {
+ rd, err := ParseDuration(data)
+ if err != nil {
+ return err
+ }
+ *d = Duration(rd)
+ return nil
+ }
+
+ return errors.New("couldn't unmarshal bson bytes value as Date")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (d *Duration) DeepCopyInto(out *Duration) {
+ *out = *d
+}
+
+// DeepCopy copies the receiver into a new Duration.
+func (d *Duration) DeepCopy() *Duration {
+ if d == nil {
+ return nil
+ }
+ out := new(Duration)
+ d.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go
new file mode 100644
index 00000000..888e107c
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/format.go
@@ -0,0 +1,327 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package strfmt
+
+import (
+ "encoding"
+ stderrors "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/mitchellh/mapstructure"
+)
+
+// Default is the default formats registry
+var Default = NewSeededFormats(nil, nil)
+
+// Validator represents a validator for a string format.
+type Validator func(string) bool
+
+// Format represents a string format.
+//
+// All implementations of Format provide a string representation and text
+// marshaling/unmarshaling interface to be used by encoders (e.g. encoding/json).
+type Format interface {
+ String() string
+ encoding.TextMarshaler
+ encoding.TextUnmarshaler
+}
+
+// Registry is a registry of string formats, with a validation method.
+type Registry interface {
+ Add(string, Format, Validator) bool
+ DelByName(string) bool
+ GetType(string) (reflect.Type, bool)
+ ContainsName(string) bool
+ Validates(string, string) bool
+ Parse(string, string) (interface{}, error)
+ MapStructureHookFunc() mapstructure.DecodeHookFunc
+}
+
+type knownFormat struct {
+ Name string
+ OrigName string
+ Type reflect.Type
+ Validator Validator
+}
+
+// NameNormalizer is a function that normalizes a format name.
+type NameNormalizer func(string) string
+
+// DefaultNameNormalizer removes all dashes
+func DefaultNameNormalizer(name string) string {
+ return strings.ReplaceAll(name, "-", "")
+}
+
+type defaultFormats struct {
+ sync.Mutex
+ data []knownFormat
+ normalizeName NameNormalizer
+}
+
+// NewFormats creates a new formats registry seeded with the values from the default
+func NewFormats() Registry {
+ //nolint:forcetypeassert
+ return NewSeededFormats(Default.(*defaultFormats).data, nil)
+}
+
+// NewSeededFormats creates a new formats registry
+func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry {
+ if normalizer == nil {
+ normalizer = DefaultNameNormalizer
+ }
+ // copy here, don't modify original
+ d := append([]knownFormat(nil), seeds...)
+ return &defaultFormats{
+ data: d,
+ normalizeName: normalizer,
+ }
+}
+
+// MapStructureHookFunc is a decode hook function for mapstructure
+func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc {
+ return func(from reflect.Type, to reflect.Type, obj interface{}) (interface{}, error) {
+ if from.Kind() != reflect.String {
+ return obj, nil
+ }
+ data, ok := obj.(string)
+ if !ok {
+ return nil, fmt.Errorf("failed to cast %+v to string", obj)
+ }
+
+ for _, v := range f.data {
+ tpe, _ := f.GetType(v.Name)
+ if to == tpe {
+ switch v.Name {
+ case "date":
+ d, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation)
+ if err != nil {
+ return nil, err
+ }
+ return Date(d), nil
+ case "datetime":
+ input := data
+ if len(input) == 0 {
+ return nil, stderrors.New("empty string is an invalid datetime format")
+ }
+ return ParseDateTime(input)
+ case "duration":
+ dur, err := ParseDuration(data)
+ if err != nil {
+ return nil, err
+ }
+ return Duration(dur), nil
+ case "uri":
+ return URI(data), nil
+ case "email":
+ return Email(data), nil
+ case "uuid":
+ return UUID(data), nil
+ case "uuid3":
+ return UUID3(data), nil
+ case "uuid4":
+ return UUID4(data), nil
+ case "uuid5":
+ return UUID5(data), nil
+ case "hostname":
+ return Hostname(data), nil
+ case "ipv4":
+ return IPv4(data), nil
+ case "ipv6":
+ return IPv6(data), nil
+ case "cidr":
+ return CIDR(data), nil
+ case "mac":
+ return MAC(data), nil
+ case "isbn":
+ return ISBN(data), nil
+ case "isbn10":
+ return ISBN10(data), nil
+ case "isbn13":
+ return ISBN13(data), nil
+ case "creditcard":
+ return CreditCard(data), nil
+ case "ssn":
+ return SSN(data), nil
+ case "hexcolor":
+ return HexColor(data), nil
+ case "rgbcolor":
+ return RGBColor(data), nil
+ case "byte":
+ return Base64(data), nil
+ case "password":
+ return Password(data), nil
+ case "ulid":
+ ulid, err := ParseULID(data)
+ if err != nil {
+ return nil, err
+ }
+ return ulid, nil
+ default:
+ return nil, errors.InvalidTypeName(v.Name)
+ }
+ }
+ }
+ return data, nil
+ }
+}
+
+// Add adds a new format, return true if this was a new item instead of a replacement
+func (f *defaultFormats) Add(name string, strfmt Format, validator Validator) bool {
+ f.Lock()
+ defer f.Unlock()
+
+ nme := f.normalizeName(name)
+
+ tpe := reflect.TypeOf(strfmt)
+ if tpe.Kind() == reflect.Ptr {
+ tpe = tpe.Elem()
+ }
+
+ for i := range f.data {
+ v := &f.data[i]
+ if v.Name == nme {
+ v.Type = tpe
+ v.Validator = validator
+ return false
+ }
+ }
+
+ // turns out it's new after all
+ f.data = append(f.data, knownFormat{Name: nme, OrigName: name, Type: tpe, Validator: validator})
+ return true
+}
+
+// GetType gets the type for the specified name
+func (f *defaultFormats) GetType(name string) (reflect.Type, bool) {
+ f.Lock()
+ defer f.Unlock()
+ nme := f.normalizeName(name)
+ for _, v := range f.data {
+ if v.Name == nme {
+ return v.Type, true
+ }
+ }
+ return nil, false
+}
+
+// DelByName removes the format by the specified name, returns true when an item was actually removed
+func (f *defaultFormats) DelByName(name string) bool {
+ f.Lock()
+ defer f.Unlock()
+
+ nme := f.normalizeName(name)
+
+ for i, v := range f.data {
+ if v.Name == nme {
+ f.data[i] = knownFormat{} // release
+ f.data = append(f.data[:i], f.data[i+1:]...)
+ return true
+ }
+ }
+ return false
+}
+
+// DelByFormat removes the specified format, returns true when an item was actually removed
+func (f *defaultFormats) DelByFormat(strfmt Format) bool {
+ f.Lock()
+ defer f.Unlock()
+
+ tpe := reflect.TypeOf(strfmt)
+ if tpe.Kind() == reflect.Ptr {
+ tpe = tpe.Elem()
+ }
+
+ for i, v := range f.data {
+ if v.Type == tpe {
+ f.data[i] = knownFormat{} // release
+ f.data = append(f.data[:i], f.data[i+1:]...)
+ return true
+ }
+ }
+ return false
+}
+
+// ContainsName returns true if this registry contains the specified name
+func (f *defaultFormats) ContainsName(name string) bool {
+ f.Lock()
+ defer f.Unlock()
+ nme := f.normalizeName(name)
+ for _, v := range f.data {
+ if v.Name == nme {
+ return true
+ }
+ }
+ return false
+}
+
+// ContainsFormat returns true if this registry contains the specified format
+func (f *defaultFormats) ContainsFormat(strfmt Format) bool {
+ f.Lock()
+ defer f.Unlock()
+ tpe := reflect.TypeOf(strfmt)
+ if tpe.Kind() == reflect.Ptr {
+ tpe = tpe.Elem()
+ }
+
+ for _, v := range f.data {
+ if v.Type == tpe {
+ return true
+ }
+ }
+ return false
+}
+
+// Validates passed data against format.
+//
+// Note that the format name is automatically normalized, e.g. one may
+// use "date-time" to use the "datetime" format validator.
+func (f *defaultFormats) Validates(name, data string) bool {
+ f.Lock()
+ defer f.Unlock()
+ nme := f.normalizeName(name)
+ for _, v := range f.data {
+ if v.Name == nme {
+ return v.Validator(data)
+ }
+ }
+ return false
+}
+
+// Parse a string into the appropriate format representation type.
+//
+// E.g. parsing a string a "date" will return a Date type.
+func (f *defaultFormats) Parse(name, data string) (interface{}, error) {
+ f.Lock()
+ defer f.Unlock()
+ nme := f.normalizeName(name)
+ for _, v := range f.data {
+ if v.Name == nme {
+ nw := reflect.New(v.Type).Interface()
+ if dec, ok := nw.(encoding.TextUnmarshaler); ok {
+ if err := dec.UnmarshalText([]byte(data)); err != nil {
+ return nil, err
+ }
+ return nw, nil
+ }
+ return nil, errors.InvalidTypeName(name)
+ }
+ }
+ return nil, errors.InvalidTypeName(name)
+}
diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go
new file mode 100644
index 00000000..f08ba4da
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/time.go
@@ -0,0 +1,321 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package strfmt
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+ "time"
+
+ "go.mongodb.org/mongo-driver/bson"
+
+ "go.mongodb.org/mongo-driver/bson/bsontype"
+)
+
+var (
+ // UnixZero sets the zero unix timestamp we want to compare against.
+ // Unix 0 for an EST timezone is not equivalent to a UTC timezone.
+ UnixZero = time.Unix(0, 0).UTC()
+)
+
+func init() {
+ dt := DateTime{}
+ Default.Add("datetime", &dt, IsDateTime)
+}
+
+// IsDateTime returns true when the string is a valid date-time
+func IsDateTime(str string) bool {
+ if len(str) < 4 {
+ return false
+ }
+ s := strings.Split(strings.ToLower(str), "t")
+ if len(s) < 2 || !IsDate(s[0]) {
+ return false
+ }
+
+ matches := rxDateTime.FindAllStringSubmatch(s[1], -1)
+ if len(matches) == 0 || len(matches[0]) == 0 {
+ return false
+ }
+ m := matches[0]
+ res := m[1] <= "23" && m[2] <= "59" && m[3] <= "59"
+ return res
+}
+
+const (
+ // RFC3339Millis represents a ISO8601 format to millis instead of to nanos
+ RFC3339Millis = "2006-01-02T15:04:05.000Z07:00"
+ // RFC3339MillisNoColon represents a ISO8601 format to millis instead of to nanos
+ RFC3339MillisNoColon = "2006-01-02T15:04:05.000Z0700"
+ // RFC3339Micro represents a ISO8601 format to micro instead of to nano
+ RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00"
+ // RFC3339MicroNoColon represents a ISO8601 format to micro instead of to nano
+ RFC3339MicroNoColon = "2006-01-02T15:04:05.000000Z0700"
+ // ISO8601LocalTime represents a ISO8601 format to ISO8601 in local time (no timezone)
+ ISO8601LocalTime = "2006-01-02T15:04:05"
+ // ISO8601TimeWithReducedPrecision represents a ISO8601 format with reduced precision (dropped secs)
+ ISO8601TimeWithReducedPrecision = "2006-01-02T15:04Z"
+ // ISO8601TimeWithReducedPrecisionLocaltime represents a ISO8601 format with reduced precision and no timezone (dropped seconds + no timezone)
+ ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04"
+ // ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern.
+ ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05"
+ // short form of ISO8601TimeUniversalSortableDateTimePattern
+ ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02"
+ // DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6
+ DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$`
+)
+
+var (
+ rxDateTime = regexp.MustCompile(DateTimePattern)
+
+ // DateTimeFormats is the collection of formats used by ParseDateTime()
+ DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm}
+
+ // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds)
+ MarshalFormat = RFC3339Millis
+
+ // NormalizeTimeForMarshal provides a normalization function on time befeore marshalling (e.g. time.UTC).
+ // By default, the time value is not changed.
+ NormalizeTimeForMarshal = func(t time.Time) time.Time { return t }
+
+ // DefaultTimeLocation provides a location for a time when the time zone is not encoded in the string (ex: ISO8601 Local variants).
+ DefaultTimeLocation = time.UTC
+)
+
+// ParseDateTime parses a string that represents an ISO8601 time or a unix epoch
+func ParseDateTime(data string) (DateTime, error) {
+ if data == "" {
+ return NewDateTime(), nil
+ }
+ var lastError error
+ for _, layout := range DateTimeFormats {
+ dd, err := time.ParseInLocation(layout, data, DefaultTimeLocation)
+ if err != nil {
+ lastError = err
+ continue
+ }
+ return DateTime(dd), nil
+ }
+ return DateTime{}, lastError
+}
+
+// DateTime is a time but it serializes to ISO8601 format with millis
+// It knows how to read 3 different variations of a RFC3339 date time.
+// Most APIs we encounter want either millisecond or second precision times.
+// This just tries to make it worry-free.
+//
+// swagger:strfmt date-time
+type DateTime time.Time
+
+// NewDateTime is a representation of zero value for DateTime type
+func NewDateTime() DateTime {
+ return DateTime(time.Unix(0, 0).UTC())
+}
+
+// String converts this time to a string
+func (t DateTime) String() string {
+ return NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat)
+}
+
+// IsZero returns whether the date time is a zero value
+func (t *DateTime) IsZero() bool {
+ if t == nil {
+ return true
+ }
+ return time.Time(*t).IsZero()
+}
+
+// IsUnixZerom returns whether the date time is equivalent to time.Unix(0, 0).UTC().
+func (t *DateTime) IsUnixZero() bool {
+ if t == nil {
+ return true
+ }
+ return time.Time(*t).Equal(UnixZero)
+}
+
+// MarshalText implements the text marshaller interface
+func (t DateTime) MarshalText() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalText implements the text unmarshaller interface
+func (t *DateTime) UnmarshalText(text []byte) error {
+ tt, err := ParseDateTime(string(text))
+ if err != nil {
+ return err
+ }
+ *t = tt
+ return nil
+}
+
+// Scan scans a DateTime value from database driver type.
+func (t *DateTime) Scan(raw interface{}) error {
+ // TODO: case int64: and case float64: ?
+ switch v := raw.(type) {
+ case []byte:
+ return t.UnmarshalText(v)
+ case string:
+ return t.UnmarshalText([]byte(v))
+ case time.Time:
+ *t = DateTime(v)
+ case nil:
+ *t = DateTime{}
+ default:
+ return fmt.Errorf("cannot sql.Scan() strfmt.DateTime from: %#v", v)
+ }
+
+ return nil
+}
+
+// Value converts DateTime to a primitive value ready to written to a database.
+func (t DateTime) Value() (driver.Value, error) {
+ return driver.Value(t.String()), nil
+}
+
+// MarshalJSON returns the DateTime as JSON
+func (t DateTime) MarshalJSON() ([]byte, error) {
+ return json.Marshal(NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat))
+}
+
+// UnmarshalJSON sets the DateTime from JSON
+func (t *DateTime) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+
+ var tstr string
+ if err := json.Unmarshal(data, &tstr); err != nil {
+ return err
+ }
+ tt, err := ParseDateTime(tstr)
+ if err != nil {
+ return err
+ }
+ *t = tt
+ return nil
+}
+
+// MarshalBSON renders the DateTime as a BSON document
+func (t DateTime) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": t})
+}
+
+// UnmarshalBSON reads the DateTime from a BSON document
+func (t *DateTime) UnmarshalBSON(data []byte) error {
+ var obj struct {
+ Data DateTime
+ }
+
+ if err := bson.Unmarshal(data, &obj); err != nil {
+ return err
+ }
+
+ *t = obj.Data
+
+ return nil
+}
+
+// MarshalBSONValue is an interface implemented by types that can marshal themselves
+// into a BSON document represented as bytes. The bytes returned must be a valid
+// BSON document if the error is nil.
+// Marshals a DateTime as a bsontype.DateTime, an int64 representing
+// milliseconds since epoch.
+func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) {
+ // UnixNano cannot be used directly, the result of calling UnixNano on the zero
+ // Time is undefined. Thats why we use time.Nanosecond() instead.
+
+ tNorm := NormalizeTimeForMarshal(time.Time(t))
+ i64 := tNorm.Unix()*1000 + int64(tNorm.Nanosecond())/1e6
+
+ buf := make([]byte, 8)
+ binary.LittleEndian.PutUint64(buf, uint64(i64))
+
+ return bson.TypeDateTime, buf, nil
+}
+
+// UnmarshalBSONValue is an interface implemented by types that can unmarshal a
+// BSON value representation of themselves. The BSON bytes and type can be
+// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
+// wishes to retain the data after returning.
+func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error {
+ if tpe == bson.TypeNull {
+ *t = DateTime{}
+ return nil
+ }
+
+ if len(data) != 8 {
+ return errors.New("bson date field length not exactly 8 bytes")
+ }
+
+ i64 := int64(binary.LittleEndian.Uint64(data))
+ // TODO: Use bsonprim.DateTime.Time() method
+ *t = DateTime(time.Unix(i64/1000, i64%1000*1000000))
+
+ return nil
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (t *DateTime) DeepCopyInto(out *DateTime) {
+ *out = *t
+}
+
+// DeepCopy copies the receiver into a new DateTime.
+func (t *DateTime) DeepCopy() *DateTime {
+ if t == nil {
+ return nil
+ }
+ out := new(DateTime)
+ t.DeepCopyInto(out)
+ return out
+}
+
+// GobEncode implements the gob.GobEncoder interface.
+func (t DateTime) GobEncode() ([]byte, error) {
+ return t.MarshalBinary()
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (t *DateTime) GobDecode(data []byte) error {
+ return t.UnmarshalBinary(data)
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (t DateTime) MarshalBinary() ([]byte, error) {
+ return NormalizeTimeForMarshal(time.Time(t)).MarshalBinary()
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (t *DateTime) UnmarshalBinary(data []byte) error {
+ var original time.Time
+
+ err := original.UnmarshalBinary(data)
+ if err != nil {
+ return err
+ }
+
+ *t = DateTime(original)
+
+ return nil
+}
+
+// Equal checks if two DateTime instances are equal using time.Time's Equal method
+func (t DateTime) Equal(t2 DateTime) bool {
+ return time.Time(t).Equal(time.Time(t2))
+}
diff --git a/vendor/github.com/go-openapi/strfmt/ulid.go b/vendor/github.com/go-openapi/strfmt/ulid.go
new file mode 100644
index 00000000..e71aff7c
--- /dev/null
+++ b/vendor/github.com/go-openapi/strfmt/ulid.go
@@ -0,0 +1,230 @@
+package strfmt
+
+import (
+ cryptorand "crypto/rand"
+ "database/sql/driver"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/oklog/ulid"
+ "go.mongodb.org/mongo-driver/bson"
+)
+
+// ULID represents a ulid string format
+// ref:
+//
+// https://github.com/ulid/spec
+//
+// impl:
+//
+// https://github.com/oklog/ulid
+//
+// swagger:strfmt ulid
+type ULID struct {
+ ulid.ULID
+}
+
+var (
+ ulidEntropyPool = sync.Pool{
+ New: func() interface{} {
+ return cryptorand.Reader
+ },
+ }
+
+ ULIDScanDefaultFunc = func(raw interface{}) (ULID, error) {
+ u := NewULIDZero()
+ switch x := raw.(type) {
+ case nil:
+ // zerp ulid
+ return u, nil
+ case string:
+ if x == "" {
+ // zero ulid
+ return u, nil
+ }
+ return u, u.UnmarshalText([]byte(x))
+ case []byte:
+ return u, u.UnmarshalText(x)
+ }
+
+ return u, fmt.Errorf("cannot sql.Scan() strfmt.ULID from: %#v: %w", raw, ulid.ErrScanValue)
+ }
+
+ // ULIDScanOverrideFunc allows you to override the Scan method of the ULID type
+ ULIDScanOverrideFunc = ULIDScanDefaultFunc
+
+ ULIDValueDefaultFunc = func(u ULID) (driver.Value, error) {
+ return driver.Value(u.String()), nil
+ }
+
+ // ULIDValueOverrideFunc allows you to override the Value method of the ULID type
+ ULIDValueOverrideFunc = ULIDValueDefaultFunc
+)
+
+func init() {
+ // register formats in the default registry:
+ // - ulid
+ ulid := ULID{}
+ Default.Add("ulid", &ulid, IsULID)
+}
+
+// IsULID checks if provided string is ULID format
+// Be noticed that this function considers overflowed ULID as non-ulid.
+// For more details see https://github.com/ulid/spec
+func IsULID(str string) bool {
+ _, err := ulid.ParseStrict(str)
+ return err == nil
+}
+
+// ParseULID parses a string that represents an valid ULID
+func ParseULID(str string) (ULID, error) {
+ var u ULID
+
+ return u, u.UnmarshalText([]byte(str))
+}
+
+// NewULIDZero returns a zero valued ULID type
+func NewULIDZero() ULID {
+ return ULID{}
+}
+
+// NewULID generates new unique ULID value and a error if any
+func NewULID() (ULID, error) {
+ var u ULID
+
+ obj := ulidEntropyPool.Get()
+ entropy, ok := obj.(io.Reader)
+ if !ok {
+ return u, fmt.Errorf("failed to cast %+v to io.Reader", obj)
+ }
+
+ id, err := ulid.New(ulid.Now(), entropy)
+ if err != nil {
+ return u, err
+ }
+ ulidEntropyPool.Put(entropy)
+
+ u.ULID = id
+ return u, nil
+}
+
+// GetULID returns underlying instance of ULID
+func (u *ULID) GetULID() interface{} {
+ return u.ULID
+}
+
+// MarshalText returns this instance into text
+func (u ULID) MarshalText() ([]byte, error) {
+ return u.ULID.MarshalText()
+}
+
+// UnmarshalText hydrates this instance from text
+func (u *ULID) UnmarshalText(data []byte) error { // validation is performed later on
+ return u.ULID.UnmarshalText(data)
+}
+
+// Scan reads a value from a database driver
+func (u *ULID) Scan(raw interface{}) error {
+ ul, err := ULIDScanOverrideFunc(raw)
+ if err == nil {
+ *u = ul
+ }
+ return err
+}
+
+// Value converts a value to a database driver value
+func (u ULID) Value() (driver.Value, error) {
+ return ULIDValueOverrideFunc(u)
+}
+
+func (u ULID) String() string {
+ return u.ULID.String()
+}
+
+// MarshalJSON returns the ULID as JSON
+func (u ULID) MarshalJSON() ([]byte, error) {
+ return json.Marshal(u.String())
+}
+
+// UnmarshalJSON sets the ULID from JSON
+func (u *ULID) UnmarshalJSON(data []byte) error {
+ if string(data) == jsonNull {
+ return nil
+ }
+ var ustr string
+ if err := json.Unmarshal(data, &ustr); err != nil {
+ return err
+ }
+ id, err := ulid.ParseStrict(ustr)
+ if err != nil {
+ return fmt.Errorf("couldn't parse JSON value as ULID: %w", err)
+ }
+ u.ULID = id
+ return nil
+}
+
+// MarshalBSON document from this value
+func (u ULID) MarshalBSON() ([]byte, error) {
+ return bson.Marshal(bson.M{"data": u.String()})
+}
+
+// UnmarshalBSON document into this value
+func (u *ULID) UnmarshalBSON(data []byte) error {
+ var m bson.M
+ if err := bson.Unmarshal(data, &m); err != nil {
+ return err
+ }
+
+ if ud, ok := m["data"].(string); ok {
+ id, err := ulid.ParseStrict(ud)
+ if err != nil {
+ return fmt.Errorf("couldn't parse bson bytes as ULID: %w", err)
+ }
+ u.ULID = id
+ return nil
+ }
+ return errors.New("couldn't unmarshal bson bytes as ULID")
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (u *ULID) DeepCopyInto(out *ULID) {
+ *out = *u
+}
+
+// DeepCopy copies the receiver into a new ULID.
+func (u *ULID) DeepCopy() *ULID {
+ if u == nil {
+ return nil
+ }
+ out := new(ULID)
+ u.DeepCopyInto(out)
+ return out
+}
+
+// GobEncode implements the gob.GobEncoder interface.
+func (u ULID) GobEncode() ([]byte, error) {
+ return u.ULID.MarshalBinary()
+}
+
+// GobDecode implements the gob.GobDecoder interface.
+func (u *ULID) GobDecode(data []byte) error {
+ return u.ULID.UnmarshalBinary(data)
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (u ULID) MarshalBinary() ([]byte, error) {
+ return u.ULID.MarshalBinary()
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (u *ULID) UnmarshalBinary(data []byte) error {
+ return u.ULID.UnmarshalBinary(data)
+}
+
+// Equal checks if two ULID instances are equal by their underlying type
+func (u ULID) Equal(other ULID) bool {
+ return u.ULID == other.ULID
+}
diff --git a/vendor/github.com/go-openapi/swag/.editorconfig b/vendor/github.com/go-openapi/swag/.editorconfig
new file mode 100644
index 00000000..3152da69
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-openapi/swag/.gitattributes b/vendor/github.com/go-openapi/swag/.gitattributes
new file mode 100644
index 00000000..49ad5276
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/.gitattributes
@@ -0,0 +1,2 @@
+# gofmt always uses LF, whereas Git uses CRLF on Windows.
+*.go text eol=lf
diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore
new file mode 100644
index 00000000..c4b1b64f
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/.gitignore
@@ -0,0 +1,5 @@
+secrets.yml
+vendor
+Godeps
+.idea
+*.out
diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml
new file mode 100644
index 00000000..d2fafb8a
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/.golangci.yml
@@ -0,0 +1,56 @@
+linters-settings:
+ gocyclo:
+ min-complexity: 45
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - recvcheck
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ #- deadcode
+ #- interfacer
+ #- scopelint
+ #- varcheck
+ #- structcheck
+ #- golint
+ #- nosnakecase
+ #- maligned
+ #- goerr113
+ #- ifshort
+ #- gomnd
+ #- exhaustivestruct
diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md
new file mode 100644
index 00000000..e7f28ed6
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md
@@ -0,0 +1,52 @@
+# Benchmarks
+
+## Name mangling utilities
+
+```bash
+go test -bench XXX -run XXX -benchtime 30s
+```
+
+### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op
+BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op
+BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op
+```
+
+### Benchmarks after PR #79
+
+~ x10 performance improvement and ~ /100 memory allocations.
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op
+```
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op
+```
diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/swag/LICENSE b/vendor/github.com/go-openapi/swag/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md
new file mode 100644
index 00000000..a7292229
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/README.md
@@ -0,0 +1,23 @@
+# Swag [](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/swag)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/swag)
+[](https://goreportcard.com/report/github.com/go-openapi/swag)
+
+Contains a bunch of helper functions for go-openapi and go-swagger projects.
+
+You may also use it standalone for your projects.
+
+* convert between value and pointers for builtin types
+* convert from string to builtin types (wraps strconv)
+* fast json concatenation
+* search in path
+* load from file or http
+* name mangling
+
+
+This repo has only few dependencies outside of the standard library:
+
+* YAML utilities depend on `gopkg.in/yaml.v3`
+* `github.com/mailru/easyjson v0.7.7`
diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go
new file mode 100644
index 00000000..fc085aeb
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/convert.go
@@ -0,0 +1,208 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "math"
+ "strconv"
+ "strings"
+)
+
+// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER
+const (
+ maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1
+ minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1
+ epsilon float64 = 1e-9
+)
+
+// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive
+func IsFloat64AJSONInteger(f float64) bool {
+ if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat {
+ return false
+ }
+ fa := math.Abs(f)
+ g := float64(uint64(f))
+ ga := math.Abs(g)
+
+ diff := math.Abs(f - g)
+
+ // more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases
+ switch {
+ case f == g: // best case
+ return true
+ case f == float64(int64(f)) || f == float64(uint64(f)): // optimistic case
+ return true
+ case f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64: // very close to 0 values
+ return diff < (epsilon * math.SmallestNonzeroFloat64)
+ }
+ // check the relative error
+ return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon
+}
+
+var evaluatesAsTrue map[string]struct{}
+
+func init() {
+ evaluatesAsTrue = map[string]struct{}{
+ "true": {},
+ "1": {},
+ "yes": {},
+ "ok": {},
+ "y": {},
+ "on": {},
+ "selected": {},
+ "checked": {},
+ "t": {},
+ "enabled": {},
+ }
+}
+
+// ConvertBool turn a string into a boolean
+func ConvertBool(str string) (bool, error) {
+ _, ok := evaluatesAsTrue[strings.ToLower(str)]
+ return ok, nil
+}
+
+// ConvertFloat32 turn a string into a float32
+func ConvertFloat32(str string) (float32, error) {
+ f, err := strconv.ParseFloat(str, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+// ConvertFloat64 turn a string into a float64
+func ConvertFloat64(str string) (float64, error) {
+ return strconv.ParseFloat(str, 64)
+}
+
+// ConvertInt8 turn a string into an int8
+func ConvertInt8(str string) (int8, error) {
+ i, err := strconv.ParseInt(str, 10, 8)
+ if err != nil {
+ return 0, err
+ }
+ return int8(i), nil
+}
+
+// ConvertInt16 turn a string into an int16
+func ConvertInt16(str string) (int16, error) {
+ i, err := strconv.ParseInt(str, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int16(i), nil
+}
+
+// ConvertInt32 turn a string into an int32
+func ConvertInt32(str string) (int32, error) {
+ i, err := strconv.ParseInt(str, 10, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(i), nil
+}
+
+// ConvertInt64 turn a string into an int64
+func ConvertInt64(str string) (int64, error) {
+ return strconv.ParseInt(str, 10, 64)
+}
+
+// ConvertUint8 turn a string into an uint8
+func ConvertUint8(str string) (uint8, error) {
+ i, err := strconv.ParseUint(str, 10, 8)
+ if err != nil {
+ return 0, err
+ }
+ return uint8(i), nil
+}
+
+// ConvertUint16 turn a string into an uint16
+func ConvertUint16(str string) (uint16, error) {
+ i, err := strconv.ParseUint(str, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(i), nil
+}
+
+// ConvertUint32 turn a string into an uint32
+func ConvertUint32(str string) (uint32, error) {
+ i, err := strconv.ParseUint(str, 10, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(i), nil
+}
+
+// ConvertUint64 turn a string into an uint64
+func ConvertUint64(str string) (uint64, error) {
+ return strconv.ParseUint(str, 10, 64)
+}
+
+// FormatBool turns a boolean into a string
+func FormatBool(value bool) string {
+ return strconv.FormatBool(value)
+}
+
+// FormatFloat32 turns a float32 into a string
+func FormatFloat32(value float32) string {
+ return strconv.FormatFloat(float64(value), 'f', -1, 32)
+}
+
+// FormatFloat64 turns a float64 into a string
+func FormatFloat64(value float64) string {
+ return strconv.FormatFloat(value, 'f', -1, 64)
+}
+
+// FormatInt8 turns an int8 into a string
+func FormatInt8(value int8) string {
+ return strconv.FormatInt(int64(value), 10)
+}
+
+// FormatInt16 turns an int16 into a string
+func FormatInt16(value int16) string {
+ return strconv.FormatInt(int64(value), 10)
+}
+
+// FormatInt32 turns an int32 into a string
+func FormatInt32(value int32) string {
+ return strconv.Itoa(int(value))
+}
+
+// FormatInt64 turns an int64 into a string
+func FormatInt64(value int64) string {
+ return strconv.FormatInt(value, 10)
+}
+
+// FormatUint8 turns an uint8 into a string
+func FormatUint8(value uint8) string {
+ return strconv.FormatUint(uint64(value), 10)
+}
+
+// FormatUint16 turns an uint16 into a string
+func FormatUint16(value uint16) string {
+ return strconv.FormatUint(uint64(value), 10)
+}
+
+// FormatUint32 turns an uint32 into a string
+func FormatUint32(value uint32) string {
+ return strconv.FormatUint(uint64(value), 10)
+}
+
+// FormatUint64 turns an uint64 into a string
+func FormatUint64(value uint64) string {
+ return strconv.FormatUint(value, 10)
+}
diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go
new file mode 100644
index 00000000..c49cc473
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/convert_types.go
@@ -0,0 +1,730 @@
+package swag
+
+import "time"
+
+// This file was taken from the aws go sdk
+
+// String returns a pointer to of the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to of the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to of the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int32 returns a pointer to of the int32 value passed in.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int32Value returns the value of the int32 pointer passed in or
+// 0 if the pointer is nil.
+func Int32Value(v *int32) int32 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int32Slice converts a slice of int32 values into a slice of
+// int32 pointers
+func Int32Slice(src []int32) []*int32 {
+ dst := make([]*int32, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int32ValueSlice converts a slice of int32 pointers into a slice of
+// int32 values
+func Int32ValueSlice(src []*int32) []int32 {
+ dst := make([]int32, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int32Map converts a string map of int32 values into a string
+// map of int32 pointers
+func Int32Map(src map[string]int32) map[string]*int32 {
+ dst := make(map[string]*int32)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int32ValueMap converts a string map of int32 pointers into a string
+// map of int32 values
+func Int32ValueMap(src map[string]*int32) map[string]int32 {
+ dst := make(map[string]int32)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to of the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint16 returns a pointer to of the uint16 value passed in.
+func Uint16(v uint16) *uint16 {
+ return &v
+}
+
+// Uint16Value returns the value of the uint16 pointer passed in or
+// 0 if the pointer is nil.
+func Uint16Value(v *uint16) uint16 {
+ if v != nil {
+ return *v
+ }
+
+ return 0
+}
+
+// Uint16Slice converts a slice of uint16 values into a slice of
+// uint16 pointers
+func Uint16Slice(src []uint16) []*uint16 {
+ dst := make([]*uint16, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+
+ return dst
+}
+
+// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
+// uint16 values
+func Uint16ValueSlice(src []*uint16) []uint16 {
+ dst := make([]uint16, len(src))
+
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+
+ return dst
+}
+
+// Uint16Map converts a string map of uint16 values into a string
+// map of uint16 pointers
+func Uint16Map(src map[string]uint16) map[string]*uint16 {
+ dst := make(map[string]*uint16)
+
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+
+ return dst
+}
+
+// Uint16ValueMap converts a string map of uint16 pointers into a string
+// map of uint16 values
+func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
+ dst := make(map[string]uint16)
+
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+
+ return dst
+}
+
+// Uint returns a pointer to of the uint value passed in.
+func Uint(v uint) *uint {
+ return &v
+}
+
+// UintValue returns the value of the uint pointer passed in or
+// 0 if the pointer is nil.
+func UintValue(v *uint) uint {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// UintSlice converts a slice of uint values into a slice of
+// uint pointers
+func UintSlice(src []uint) []*uint {
+ dst := make([]*uint, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// UintValueSlice converts a slice of uint pointers into a slice of
+// uint values
+func UintValueSlice(src []*uint) []uint {
+ dst := make([]uint, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// UintMap converts a string map of uint values into a string
+// map of uint pointers
+func UintMap(src map[string]uint) map[string]*uint {
+ dst := make(map[string]*uint)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// UintValueMap converts a string map of uint pointers into a string
+// map of uint values
+func UintValueMap(src map[string]*uint) map[string]uint {
+ dst := make(map[string]uint)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint32 returns a pointer to of the uint32 value passed in.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint32Value returns the value of the uint32 pointer passed in or
+// 0 if the pointer is nil.
+func Uint32Value(v *uint32) uint32 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint32Slice converts a slice of uint32 values into a slice of
+// uint32 pointers
+func Uint32Slice(src []uint32) []*uint32 {
+ dst := make([]*uint32, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
+// uint32 values
+func Uint32ValueSlice(src []*uint32) []uint32 {
+ dst := make([]uint32, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint32Map converts a string map of uint32 values into a string
+// map of uint32 pointers
+func Uint32Map(src map[string]uint32) map[string]*uint32 {
+ dst := make(map[string]*uint32)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint32ValueMap converts a string map of uint32 pointers into a string
+// map of uint32 values
+func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
+ dst := make(map[string]uint32)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint64 returns a pointer to of the uint64 value passed in.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// Uint64Value returns the value of the uint64 pointer passed in or
+// 0 if the pointer is nil.
+func Uint64Value(v *uint64) uint64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint64Slice converts a slice of uint64 values into a slice of
+// uint64 pointers
+func Uint64Slice(src []uint64) []*uint64 {
+ dst := make([]*uint64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
+// uint64 values
+func Uint64ValueSlice(src []*uint64) []uint64 {
+ dst := make([]uint64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint64Map converts a string map of uint64 values into a string
+// map of uint64 pointers
+func Uint64Map(src map[string]uint64) map[string]*uint64 {
+ dst := make(map[string]*uint64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint64ValueMap converts a string map of uint64 pointers into a string
+// map of uint64 values
+func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
+ dst := make(map[string]uint64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float32 returns a pointer to of the float32 value passed in.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float32Value returns the value of the float32 pointer passed in or
+// 0 if the pointer is nil.
+func Float32Value(v *float32) float32 {
+ if v != nil {
+ return *v
+ }
+
+ return 0
+}
+
+// Float32Slice converts a slice of float32 values into a slice of
+// float32 pointers
+func Float32Slice(src []float32) []*float32 {
+ dst := make([]*float32, len(src))
+
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+
+ return dst
+}
+
+// Float32ValueSlice converts a slice of float32 pointers into a slice of
+// float32 values
+func Float32ValueSlice(src []*float32) []float32 {
+ dst := make([]float32, len(src))
+
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+
+ return dst
+}
+
+// Float32Map converts a string map of float32 values into a string
+// map of float32 pointers
+func Float32Map(src map[string]float32) map[string]*float32 {
+ dst := make(map[string]*float32)
+
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+
+ return dst
+}
+
+// Float32ValueMap converts a string map of float32 pointers into a string
+// map of float32 values
+func Float32ValueMap(src map[string]*float32) map[string]float32 {
+ dst := make(map[string]float32)
+
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+
+ return dst
+}
+
+// Float64 returns a pointer to of the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to of the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go
new file mode 100644
index 00000000..55094cb7
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/doc.go
@@ -0,0 +1,31 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package swag contains a bunch of helper functions for go-openapi and go-swagger projects.
+
+You may also use it standalone for your projects.
+
+ - convert between value and pointers for builtin types
+ - convert from string to builtin types (wraps strconv)
+ - fast json concatenation
+ - search in path
+ - load from file or http
+ - name mangling
+
+This repo has only few dependencies outside of the standard library:
+
+ - YAML utilities depend on gopkg.in/yaml.v2
+*/
+package swag
diff --git a/vendor/github.com/go-openapi/swag/errors.go b/vendor/github.com/go-openapi/swag/errors.go
new file mode 100644
index 00000000..6c67fbf9
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/errors.go
@@ -0,0 +1,15 @@
+package swag
+
+type swagError string
+
+const (
+ // ErrYAML is an error raised by YAML utilities
+ ErrYAML swagError = "yaml error"
+
+ // ErrLoader is an error raised by the file loader utility
+ ErrLoader swagError = "loader error"
+)
+
+func (e swagError) Error() string {
+ return string(e)
+}
diff --git a/vendor/github.com/go-openapi/swag/file.go b/vendor/github.com/go-openapi/swag/file.go
new file mode 100644
index 00000000..16accc55
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/file.go
@@ -0,0 +1,33 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import "mime/multipart"
+
+// File represents an uploaded file.
+type File struct {
+ Data multipart.File
+ Header *multipart.FileHeader
+}
+
+// Read bytes from the file
+func (f *File) Read(p []byte) (n int, err error) {
+ return f.Data.Read(p)
+}
+
+// Close the file
+func (f *File) Close() error {
+ return f.Data.Close()
+}
diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go
new file mode 100644
index 00000000..20a359bb
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/initialism_index.go
@@ -0,0 +1,202 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ // commonInitialisms are common acronyms that are kept as whole uppercased words.
+ commonInitialisms *indexOfInitialisms
+
+ // initialisms is a slice of sorted initialisms
+ initialisms []string
+
+ // a copy of initialisms pre-baked as []rune
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune
+
+ isInitialism func(string) bool
+
+ maxAllocMatches int
+)
+
+func init() {
+ // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
+ configuredInitialisms := map[string]bool{
+ "ACL": true,
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTPS": true,
+ "HTTP": true,
+ "ID": true,
+ "IP": true,
+ "IPv4": true,
+ "IPv6": true,
+ "JSON": true,
+ "LHS": true,
+ "OAI": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SQL": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UI": true,
+ "UID": true,
+ "UUID": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XMPP": true,
+ "XSRF": true,
+ "XSS": true,
+ }
+
+ // a thread-safe index of initialisms
+ commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+ maxAllocMatches = maxAllocHeuristic(initialismsRunes)
+
+ // a test function
+ isInitialism = commonInitialisms.isInitialism
+}
+
+func asRunes(in []string) [][]rune {
+ out := make([][]rune, len(in))
+ for i, initialism := range in {
+ out[i] = []rune(initialism)
+ }
+
+ return out
+}
+
+func asUpperCased(in []string) [][]rune {
+ out := make([][]rune, len(in))
+
+ for i, initialism := range in {
+ out[i] = []rune(upper(trim(initialism)))
+ }
+
+ return out
+}
+
+func maxAllocHeuristic(in [][]rune) int {
+ heuristic := make(map[rune]int)
+ for _, initialism := range in {
+ heuristic[initialism[0]]++
+ }
+
+ var maxAlloc int
+ for _, val := range heuristic {
+ if val > maxAlloc {
+ maxAlloc = val
+ }
+ }
+
+ return maxAlloc
+}
+
+// AddInitialisms add additional initialisms
+func AddInitialisms(words ...string) {
+ for _, word := range words {
+ // commonInitialisms[upper(word)] = true
+ commonInitialisms.add(upper(word))
+ }
+ // sort again
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+}
+
+// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
+// Since go1.9, this may be implemented with sync.Map.
+type indexOfInitialisms struct {
+ sortMutex *sync.Mutex
+ index *sync.Map
+}
+
+func newIndexOfInitialisms() *indexOfInitialisms {
+ return &indexOfInitialisms{
+ sortMutex: new(sync.Mutex),
+ index: new(sync.Map),
+ }
+}
+
+func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ for k, v := range initial {
+ m.index.Store(k, v)
+ }
+ return m
+}
+
+func (m *indexOfInitialisms) isInitialism(key string) bool {
+ _, ok := m.index.Load(key)
+ return ok
+}
+
+func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
+ m.index.Store(key, true)
+ return m
+}
+
+func (m *indexOfInitialisms) sorted() (result []string) {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ m.index.Range(func(key, _ interface{}) bool {
+ k := key.(string)
+ result = append(result, k)
+ return true
+ })
+ sort.Sort(sort.Reverse(byInitialism(result)))
+ return
+}
+
+type byInitialism []string
+
+func (s byInitialism) Len() int {
+ return len(s)
+}
+func (s byInitialism) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s byInitialism) Less(i, j int) bool {
+ if len(s[i]) != len(s[j]) {
+ return len(s[i]) < len(s[j])
+ }
+
+ return strings.Compare(s[i], s[j]) > 0
+}
diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go
new file mode 100644
index 00000000..c7caa990
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/json.go
@@ -0,0 +1,313 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "bytes"
+ "encoding/json"
+ "log"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// nullJSON represents a JSON object with null type
+var nullJSON = []byte("null")
+
+// DefaultJSONNameProvider the default cache for types
+var DefaultJSONNameProvider = NewNameProvider()
+
+const comma = byte(',')
+
+var closers map[byte]byte
+
+func init() {
+ closers = map[byte]byte{
+ '{': '}',
+ '[': ']',
+ }
+}
+
+type ejMarshaler interface {
+ MarshalEasyJSON(w *jwriter.Writer)
+}
+
+type ejUnmarshaler interface {
+ UnmarshalEasyJSON(w *jlexer.Lexer)
+}
+
+// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaler
+// so it takes the fastest option available.
+func WriteJSON(data interface{}) ([]byte, error) {
+ if d, ok := data.(ejMarshaler); ok {
+ jw := new(jwriter.Writer)
+ d.MarshalEasyJSON(jw)
+ return jw.BuildBytes()
+ }
+ if d, ok := data.(json.Marshaler); ok {
+ return d.MarshalJSON()
+ }
+ return json.Marshal(data)
+}
+
+// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaler
+// so it takes the fastest option available
+func ReadJSON(data []byte, value interface{}) error {
+ trimmedData := bytes.Trim(data, "\x00")
+ if d, ok := value.(ejUnmarshaler); ok {
+ jl := &jlexer.Lexer{Data: trimmedData}
+ d.UnmarshalEasyJSON(jl)
+ return jl.Error()
+ }
+ if d, ok := value.(json.Unmarshaler); ok {
+ return d.UnmarshalJSON(trimmedData)
+ }
+ return json.Unmarshal(trimmedData, value)
+}
+
+// DynamicJSONToStruct converts an untyped json structure into a struct
+func DynamicJSONToStruct(data interface{}, target interface{}) error {
+ // TODO: convert straight to a json typed map (mergo + iterate?)
+ b, err := WriteJSON(data)
+ if err != nil {
+ return err
+ }
+ return ReadJSON(b, target)
+}
+
+// ConcatJSON concatenates multiple json objects efficiently
+func ConcatJSON(blobs ...[]byte) []byte {
+ if len(blobs) == 0 {
+ return nil
+ }
+
+ last := len(blobs) - 1
+ for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) {
+ // strips trailing null objects
+ last--
+ if last < 0 {
+ // there was nothing but "null"s or nil...
+ return nil
+ }
+ }
+ if last == 0 {
+ return blobs[0]
+ }
+
+ var opening, closing byte
+ var idx, a int
+ buf := bytes.NewBuffer(nil)
+
+ for i, b := range blobs[:last+1] {
+ if b == nil || bytes.Equal(b, nullJSON) {
+ // a null object is in the list: skip it
+ continue
+ }
+ if len(b) > 0 && opening == 0 { // is this an array or an object?
+ opening, closing = b[0], closers[b[0]]
+ }
+
+ if opening != '{' && opening != '[' {
+ continue // don't know how to concatenate non container objects
+ }
+
+ const minLengthIfNotEmpty = 3
+ if len(b) < minLengthIfNotEmpty { // yep empty but also the last one, so closing this thing
+ if i == last && a > 0 {
+ if err := buf.WriteByte(closing); err != nil {
+ log.Println(err)
+ }
+ }
+ continue
+ }
+
+ idx = 0
+ if a > 0 { // we need to join with a comma for everything beyond the first non-empty item
+ if err := buf.WriteByte(comma); err != nil {
+ log.Println(err)
+ }
+ idx = 1 // this is not the first or the last so we want to drop the leading bracket
+ }
+
+ if i != last { // not the last one, strip brackets
+ if _, err := buf.Write(b[idx : len(b)-1]); err != nil {
+ log.Println(err)
+ }
+ } else { // last one, strip only the leading bracket
+ if _, err := buf.Write(b[idx:]); err != nil {
+ log.Println(err)
+ }
+ }
+ a++
+ }
+ // somehow it ended up being empty, so provide a default value
+ if buf.Len() == 0 {
+ if err := buf.WriteByte(opening); err != nil {
+ log.Println(err)
+ }
+ if err := buf.WriteByte(closing); err != nil {
+ log.Println(err)
+ }
+ }
+ return buf.Bytes()
+}
+
+// ToDynamicJSON turns an object into a properly JSON typed structure
+func ToDynamicJSON(data interface{}) interface{} {
+ // TODO: convert straight to a json typed map (mergo + iterate?)
+ b, err := json.Marshal(data)
+ if err != nil {
+ log.Println(err)
+ }
+ var res interface{}
+ if err := json.Unmarshal(b, &res); err != nil {
+ log.Println(err)
+ }
+ return res
+}
+
+// FromDynamicJSON turns an object into a properly JSON typed structure
+func FromDynamicJSON(data, target interface{}) error {
+ b, err := json.Marshal(data)
+ if err != nil {
+ log.Println(err)
+ }
+ return json.Unmarshal(b, target)
+}
+
+// NameProvider represents an object capable of translating from go property names
+// to json property names
+// This type is thread-safe.
+type NameProvider struct {
+ lock *sync.Mutex
+ index map[reflect.Type]nameIndex
+}
+
+type nameIndex struct {
+ jsonNames map[string]string
+ goNames map[string]string
+}
+
+// NewNameProvider creates a new name provider
+func NewNameProvider() *NameProvider {
+ return &NameProvider{
+ lock: &sync.Mutex{},
+ index: make(map[reflect.Type]nameIndex),
+ }
+}
+
+func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) {
+ for i := 0; i < tpe.NumField(); i++ {
+ targetDes := tpe.Field(i)
+
+ if targetDes.PkgPath != "" { // unexported
+ continue
+ }
+
+ if targetDes.Anonymous { // walk embedded structures tree down first
+ buildnameIndex(targetDes.Type, idx, reverseIdx)
+ continue
+ }
+
+ if tag := targetDes.Tag.Get("json"); tag != "" {
+
+ parts := strings.Split(tag, ",")
+ if len(parts) == 0 {
+ continue
+ }
+
+ nm := parts[0]
+ if nm == "-" {
+ continue
+ }
+ if nm == "" { // empty string means we want to use the Go name
+ nm = targetDes.Name
+ }
+
+ idx[nm] = targetDes.Name
+ reverseIdx[targetDes.Name] = nm
+ }
+ }
+}
+
+func newNameIndex(tpe reflect.Type) nameIndex {
+ var idx = make(map[string]string, tpe.NumField())
+ var reverseIdx = make(map[string]string, tpe.NumField())
+
+ buildnameIndex(tpe, idx, reverseIdx)
+ return nameIndex{jsonNames: idx, goNames: reverseIdx}
+}
+
+// GetJSONNames gets all the json property names for a type
+func (n *NameProvider) GetJSONNames(subject interface{}) []string {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
+ names, ok := n.index[tpe]
+ if !ok {
+ names = n.makeNameIndex(tpe)
+ }
+
+ res := make([]string, 0, len(names.jsonNames))
+ for k := range names.jsonNames {
+ res = append(res, k)
+ }
+ return res
+}
+
+// GetJSONName gets the json name for a go property name
+func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) {
+ tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
+ return n.GetJSONNameForType(tpe, name)
+}
+
+// GetJSONNameForType gets the json name for a go property name on a given type
+func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ names, ok := n.index[tpe]
+ if !ok {
+ names = n.makeNameIndex(tpe)
+ }
+ nme, ok := names.goNames[name]
+ return nme, ok
+}
+
+func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex {
+ names := newNameIndex(tpe)
+ n.index[tpe] = names
+ return names
+}
+
+// GetGoName gets the go name for a json property name
+func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) {
+ tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
+ return n.GetGoNameForType(tpe, name)
+}
+
+// GetGoNameForType gets the go name for a given type for a json property name
+func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ names, ok := n.index[tpe]
+ if !ok {
+ names = n.makeNameIndex(tpe)
+ }
+ nme, ok := names.jsonNames[name]
+ return nme, ok
+}
diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go
new file mode 100644
index 00000000..658a24b7
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/loading.go
@@ -0,0 +1,176 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// LoadHTTPTimeout the default timeout for load requests
+var LoadHTTPTimeout = 30 * time.Second
+
+// LoadHTTPBasicAuthUsername the username to use when load requests require basic auth
+var LoadHTTPBasicAuthUsername = ""
+
+// LoadHTTPBasicAuthPassword the password to use when load requests require basic auth
+var LoadHTTPBasicAuthPassword = ""
+
+// LoadHTTPCustomHeaders an optional collection of custom HTTP headers for load requests
+var LoadHTTPCustomHeaders = map[string]string{}
+
+// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
+func LoadFromFileOrHTTP(pth string) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth)
+}
+
+// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
+// timeout arg allows for per request overriding of the request timeout
+func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth)
+}
+
+// LoadStrategy returns a loader function for a given path or URI.
+//
+// The load strategy returns the remote load for any path starting with `http`.
+// So this works for any URI with a scheme `http` or `https`.
+//
+// The fallback strategy is to call the local loader.
+//
+// The local loader takes a local file system path (absolute or relative) as argument,
+// or alternatively a `file://...` URI, **without host** (see also below for windows).
+//
+// There are a few liberalities, initially intended to be tolerant regarding the URI syntax,
+// especially on windows.
+//
+// Before the local loader is called, the given path is transformed:
+// - percent-encoded characters are unescaped
+// - simple paths (e.g. `./folder/file`) are passed as-is
+// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too.
+//
+// For paths provided as URIs with the "file" scheme, please note that:
+// - `file://` is simply stripped.
+// This means that the host part of the URI is not parsed at all.
+// For example, `file:///folder/file" becomes "/folder/file`,
+// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems.
+// Similarly, `file://./folder/file` yields `./folder/file`.
+// - on windows, `file://...` can take a host so as to specify an UNC share location.
+//
+// Reminder about windows-specifics:
+// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported)
+// - `file:///c:/folder/file` becomes `C:\folder\file`
+// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file`
+func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
+ if strings.HasPrefix(pth, "http") {
+ return remote
+ }
+
+ return func(p string) ([]byte, error) {
+ upth, err := url.PathUnescape(p)
+ if err != nil {
+ return nil, err
+ }
+
+ if !strings.HasPrefix(p, `file://`) {
+ // regular file path provided: just normalize slashes
+ return local(filepath.FromSlash(upth))
+ }
+
+ if runtime.GOOS != "windows" {
+ // crude processing: this leaves full URIs with a host with a (mostly) unexpected result
+ upth = strings.TrimPrefix(upth, `file://`)
+
+ return local(filepath.FromSlash(upth))
+ }
+
+ // windows-only pre-processing of file://... URIs
+
+ // support for canonical file URIs on windows.
+ u, err := url.Parse(filepath.ToSlash(upth))
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Host != "" {
+ // assume UNC name (volume share)
+ // NOTE: UNC port not yet supported
+
+ // when the "host" segment is a drive letter:
+ // file://C:/folder/... => C:\folder
+ upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`))
+ if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' {
+ // tolerance: if we have a leading dot, this can't be a host
+ // file://host/share/folder\... ==> \\host\share\path\folder
+ upth = "//" + upth
+ }
+ } else {
+ // no host, let's figure out if this is a drive letter
+ upth = strings.TrimPrefix(upth, `file://`)
+ first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/")
+ if strings.HasSuffix(first, ":") {
+ // drive letter in the first segment:
+ // file:///c:/folder/... ==> strip the leading slash
+ upth = strings.TrimPrefix(upth, `/`)
+ }
+ }
+
+ return local(filepath.FromSlash(upth))
+ }
+}
+
+func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) {
+ return func(path string) ([]byte, error) {
+ client := &http.Client{Timeout: timeout}
+ req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx
+ if err != nil {
+ return nil, err
+ }
+
+ if LoadHTTPBasicAuthUsername != "" && LoadHTTPBasicAuthPassword != "" {
+ req.SetBasicAuth(LoadHTTPBasicAuthUsername, LoadHTTPBasicAuthPassword)
+ }
+
+ for key, val := range LoadHTTPCustomHeaders {
+ req.Header.Set(key, val)
+ }
+
+ resp, err := client.Do(req)
+ defer func() {
+ if resp != nil {
+ if e := resp.Body.Close(); e != nil {
+ log.Println(e)
+ }
+ }
+ }()
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("could not access document at %q [%s]: %w", path, resp.Status, ErrLoader)
+ }
+
+ return io.ReadAll(resp.Body)
+ }
+}
diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go
new file mode 100644
index 00000000..8bb64ac3
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/name_lexem.go
@@ -0,0 +1,93 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+type (
+ lexemKind uint8
+
+ nameLexem struct {
+ original string
+ matchedInitialism string
+ kind lexemKind
+ }
+)
+
+const (
+ lexemKindCasualName lexemKind = iota
+ lexemKindInitialismName
+)
+
+func newInitialismNameLexem(original, matchedInitialism string) nameLexem {
+ return nameLexem{
+ kind: lexemKindInitialismName,
+ original: original,
+ matchedInitialism: matchedInitialism,
+ }
+}
+
+func newCasualNameLexem(original string) nameLexem {
+ return nameLexem{
+ kind: lexemKindCasualName,
+ original: original,
+ }
+}
+
+func (l nameLexem) GetUnsafeGoName() string {
+ if l.kind == lexemKindInitialismName {
+ return l.matchedInitialism
+ }
+
+ var (
+ first rune
+ rest string
+ )
+
+ for i, orig := range l.original {
+ if i == 0 {
+ first = orig
+ continue
+ }
+
+ if i > 0 {
+ rest = l.original[i:]
+ break
+ }
+ }
+
+ if len(l.original) > 1 {
+ b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(b)
+ }()
+ b.WriteRune(unicode.ToUpper(first))
+ b.WriteString(lower(rest))
+ return b.String()
+ }
+
+ return l.original
+}
+
+func (l nameLexem) GetOriginal() string {
+ return l.original
+}
+
+func (l nameLexem) IsInitialism() bool {
+ return l.kind == lexemKindInitialismName
+}
diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go
new file mode 100644
index 00000000..821235f8
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/net.go
@@ -0,0 +1,38 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "net"
+ "strconv"
+)
+
+// SplitHostPort splits a network address into a host and a port.
+// The port is -1 when there is no port to be found
+func SplitHostPort(addr string) (host string, port int, err error) {
+ h, p, err := net.SplitHostPort(addr)
+ if err != nil {
+ return "", -1, err
+ }
+ if p == "" {
+ return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr}
+ }
+
+ pi, err := strconv.Atoi(p)
+ if err != nil {
+ return "", -1, err
+ }
+ return h, pi, nil
+}
diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/path.go
new file mode 100644
index 00000000..941bd017
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/path.go
@@ -0,0 +1,59 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+const (
+ // GOPATHKey represents the env key for gopath
+ GOPATHKey = "GOPATH"
+)
+
+// FindInSearchPath finds a package in a provided lists of paths
+func FindInSearchPath(searchPath, pkg string) string {
+ pathsList := filepath.SplitList(searchPath)
+ for _, path := range pathsList {
+ if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil {
+ if _, err := os.Stat(evaluatedPath); err == nil {
+ return evaluatedPath
+ }
+ }
+ }
+ return ""
+}
+
+// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT
+func FindInGoSearchPath(pkg string) string {
+ return FindInSearchPath(FullGoSearchPath(), pkg)
+}
+
+// FullGoSearchPath gets the search paths for finding packages
+func FullGoSearchPath() string {
+ allPaths := os.Getenv(GOPATHKey)
+ if allPaths == "" {
+ allPaths = filepath.Join(os.Getenv("HOME"), "go")
+ }
+ if allPaths != "" {
+ allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":")
+ } else {
+ allPaths = runtime.GOROOT()
+ }
+ return allPaths
+}
diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go
new file mode 100644
index 00000000..274727a8
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/split.go
@@ -0,0 +1,508 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "bytes"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+type (
+ splitter struct {
+ initialisms []string
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version
+ postSplitInitialismCheck bool
+ }
+
+ splitterOption func(*splitter)
+
+ initialismMatch struct {
+ body []rune
+ start, end int
+ complete bool
+ }
+ initialismMatches []initialismMatch
+)
+
+type (
+ // memory pools of temporary objects.
+ //
+ // These are used to recycle temporarily allocated objects
+ // and relieve the GC from undue pressure.
+
+ matchesPool struct {
+ *sync.Pool
+ }
+
+ buffersPool struct {
+ *sync.Pool
+ }
+
+ lexemsPool struct {
+ *sync.Pool
+ }
+
+ splittersPool struct {
+ *sync.Pool
+ }
+)
+
+var (
+ // poolOfMatches holds temporary slices for recycling during the initialism match process
+ poolOfMatches = matchesPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make(initialismMatches, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfBuffers = buffersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+ },
+ }
+
+ poolOfLexems = lexemsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make([]nameLexem, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfSplitters = splittersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := newSplitter()
+
+ return &s
+ },
+ },
+ }
+)
+
+// nameReplaceTable finds a word representation for special characters.
+func nameReplaceTable(r rune) (string, bool) {
+ switch r {
+ case '@':
+ return "At ", true
+ case '&':
+ return "And ", true
+ case '|':
+ return "Pipe ", true
+ case '$':
+ return "Dollar ", true
+ case '!':
+ return "Bang ", true
+ case '-':
+ return "", true
+ case '_':
+ return "", true
+ default:
+ return "", false
+ }
+}
+
+// split calls the splitter.
+//
+// Use newSplitter for more control and options
+func split(str string) []string {
+ s := poolOfSplitters.BorrowSplitter()
+ lexems := s.split(str)
+ result := make([]string, 0, len(*lexems))
+
+ for _, lexem := range *lexems {
+ result = append(result, lexem.GetOriginal())
+ }
+ poolOfLexems.RedeemLexems(lexems)
+ poolOfSplitters.RedeemSplitter(s)
+
+ return result
+
+}
+
+func newSplitter(options ...splitterOption) splitter {
+ s := splitter{
+ postSplitInitialismCheck: false,
+ initialisms: initialisms,
+ initialismsRunes: initialismsRunes,
+ initialismsUpperCased: initialismsUpperCased,
+ }
+
+ for _, option := range options {
+ option(&s)
+ }
+
+ return s
+}
+
+// withPostSplitInitialismCheck allows to catch initialisms after main split process
+func withPostSplitInitialismCheck(s *splitter) {
+ s.postSplitInitialismCheck = true
+}
+
+func (p matchesPool) BorrowMatches() *initialismMatches {
+ s := p.Get().(*initialismMatches)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
+ return s
+}
+
+func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer {
+ s := p.Get().(*bytes.Buffer)
+ s.Reset()
+
+ if s.Cap() < size {
+ s.Grow(size)
+ }
+
+ return s
+}
+
+func (p lexemsPool) BorrowLexems() *[]nameLexem {
+ s := p.Get().(*[]nameLexem)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
+ return s
+}
+
+func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter {
+ s := p.Get().(*splitter)
+ s.postSplitInitialismCheck = false // reset options
+ for _, apply := range options {
+ apply(s)
+ }
+
+ return s
+}
+
+func (p matchesPool) RedeemMatches(s *initialismMatches) {
+ p.Put(s)
+}
+
+func (p buffersPool) RedeemBuffer(s *bytes.Buffer) {
+ p.Put(s)
+}
+
+func (p lexemsPool) RedeemLexems(s *[]nameLexem) {
+ p.Put(s)
+}
+
+func (p splittersPool) RedeemSplitter(s *splitter) {
+ p.Put(s)
+}
+
+func (m initialismMatch) isZero() bool {
+ return m.start == 0 && m.end == 0
+}
+
+func (s splitter) split(name string) *[]nameLexem {
+ nameRunes := []rune(name)
+ matches := s.gatherInitialismMatches(nameRunes)
+ if matches == nil {
+ return poolOfLexems.BorrowLexems()
+ }
+
+ return s.mapMatchesToNameLexems(nameRunes, matches)
+}
+
+func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
+ var matches *initialismMatches
+
+ for currentRunePosition, currentRune := range nameRunes {
+ // recycle these allocations as we loop over runes
+ // with such recycling, only 2 slices should be allocated per call
+ // instead of o(n).
+ newMatches := poolOfMatches.BorrowMatches()
+
+ // check current initialism matches
+ if matches != nil { // skip first iteration
+ for _, match := range *matches {
+ if keepCompleteMatch := match.complete; keepCompleteMatch {
+ *newMatches = append(*newMatches, match)
+ continue
+ }
+
+ // drop failed match
+ currentMatchRune := match.body[currentRunePosition-match.start]
+ if currentMatchRune != currentRune {
+ continue
+ }
+
+ // try to complete ongoing match
+ if currentRunePosition-match.start == len(match.body)-1 {
+ // we are close; the next step is to check the symbol ahead
+ // if it is a small letter, then it is not the end of match
+ // but beginning of the next word
+
+ if currentRunePosition < len(nameRunes)-1 {
+ nextRune := nameRunes[currentRunePosition+1]
+ if newWord := unicode.IsLower(nextRune); newWord {
+ // oh ok, it was the start of a new word
+ continue
+ }
+ }
+
+ match.complete = true
+ match.end = currentRunePosition
+ }
+
+ *newMatches = append(*newMatches, match)
+ }
+ }
+
+ // check for new initialism matches
+ for i := range s.initialisms {
+ initialismRunes := s.initialismsRunes[i]
+ if initialismRunes[0] == currentRune {
+ *newMatches = append(*newMatches, initialismMatch{
+ start: currentRunePosition,
+ body: initialismRunes,
+ complete: false,
+ })
+ }
+ }
+
+ if matches != nil {
+ poolOfMatches.RedeemMatches(matches)
+ }
+ matches = newMatches
+ }
+
+ // up to the caller to redeem this last slice
+ return matches
+}
+
+func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem {
+ nameLexems := poolOfLexems.BorrowLexems()
+
+ var lastAcceptedMatch initialismMatch
+ for _, match := range *matches {
+ if !match.complete {
+ continue
+ }
+
+ if firstMatch := lastAcceptedMatch.isZero(); firstMatch {
+ s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start])
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
+
+ lastAcceptedMatch = match
+
+ continue
+ }
+
+ if overlappedMatch := match.start <= lastAcceptedMatch.end; overlappedMatch {
+ continue
+ }
+
+ middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
+ s.appendBrokenDownCasualString(nameLexems, middle)
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
+
+ lastAcceptedMatch = match
+ }
+
+ // we have not found any accepted matches
+ if lastAcceptedMatch.isZero() {
+ *nameLexems = (*nameLexems)[:0]
+ s.appendBrokenDownCasualString(nameLexems, nameRunes)
+ } else if lastAcceptedMatch.end+1 != len(nameRunes) {
+ rest := nameRunes[lastAcceptedMatch.end+1:]
+ s.appendBrokenDownCasualString(nameLexems, rest)
+ }
+
+ poolOfMatches.RedeemMatches(matches)
+
+ return nameLexems
+}
+
+func (s splitter) breakInitialism(original string) nameLexem {
+ return newInitialismNameLexem(original, original)
+}
+
+func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) {
+ currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused
+ defer func() {
+ poolOfBuffers.RedeemBuffer(currentSegment)
+ }()
+
+ addCasualNameLexem := func(original string) {
+ *segments = append(*segments, newCasualNameLexem(original))
+ }
+
+ addInitialismNameLexem := func(original, match string) {
+ *segments = append(*segments, newInitialismNameLexem(original, match))
+ }
+
+ var addNameLexem func(string)
+ if s.postSplitInitialismCheck {
+ addNameLexem = func(original string) {
+ for i := range s.initialisms {
+ if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) {
+ addInitialismNameLexem(original, s.initialisms[i])
+
+ return
+ }
+ }
+
+ addCasualNameLexem(original)
+ }
+ } else {
+ addNameLexem = addCasualNameLexem
+ }
+
+ for _, rn := range str {
+ if replace, found := nameReplaceTable(rn); found {
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
+ }
+
+ if replace != "" {
+ addNameLexem(replace)
+ }
+
+ continue
+ }
+
+ if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
+ }
+
+ continue
+ }
+
+ if unicode.IsUpper(rn) {
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ }
+ currentSegment.Reset()
+ }
+
+ currentSegment.WriteRune(rn)
+ }
+
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ }
+}
+
+// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but
+// it ignores leading and trailing blank spaces in the compared
+// string.
+//
+// base is assumed to be composed of upper-cased runes, and be already
+// trimmed.
+//
+// This code is heavily inspired from strings.EqualFold.
+func isEqualFoldIgnoreSpace(base []rune, str string) bool {
+ var i, baseIndex int
+ // equivalent to b := []byte(str), but without data copy
+ b := hackStringBytes(str)
+
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ break
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ i += size
+ }
+
+ if i >= len(b) {
+ return len(base) == 0
+ }
+
+ for _, baseRune := range base {
+ if i >= len(b) {
+ break
+ }
+
+ if c := b[i]; c < utf8.RuneSelf {
+ // single byte rune case (ASCII)
+ if baseRune >= utf8.RuneSelf {
+ return false
+ }
+
+ baseChar := byte(baseRune)
+ if c != baseChar &&
+ !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) {
+ return false
+ }
+
+ baseIndex++
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.ToUpper(r) != baseRune {
+ return false
+ }
+ baseIndex++
+ i += size
+ }
+
+ if baseIndex != len(base) {
+ return false
+ }
+
+ // all passed: now we should only have blanks
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ return false
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ return false
+ }
+
+ i += size
+ }
+
+ return true
+}
diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go
new file mode 100644
index 00000000..90745d5c
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/string_bytes.go
@@ -0,0 +1,8 @@
+package swag
+
+import "unsafe"
+
+// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
+func hackStringBytes(str string) []byte {
+ return unsafe.Slice(unsafe.StringData(str), len(str))
+}
diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go
new file mode 100644
index 00000000..5051401c
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/util.go
@@ -0,0 +1,364 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// GoNamePrefixFunc sets an optional rule to prefix go names
+// which do not start with a letter.
+//
+// The prefix function is assumed to return a string that starts with an upper case letter.
+//
+// e.g. to help convert "123" into "{prefix}123"
+//
+// The default is to prefix with "X"
+var GoNamePrefixFunc func(string) string
+
+func prefixFunc(name, in string) string {
+ if GoNamePrefixFunc == nil {
+ return "X" + in
+ }
+
+ return GoNamePrefixFunc(name) + in
+}
+
+const (
+ // collectionFormatComma = "csv"
+ collectionFormatSpace = "ssv"
+ collectionFormatTab = "tsv"
+ collectionFormatPipe = "pipes"
+ collectionFormatMulti = "multi"
+)
+
+// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute):
+//
+// ssv: space separated value
+// tsv: tab separated value
+// pipes: pipe (|) separated value
+// csv: comma separated value (default)
+func JoinByFormat(data []string, format string) []string {
+ if len(data) == 0 {
+ return data
+ }
+ var sep string
+ switch format {
+ case collectionFormatSpace:
+ sep = " "
+ case collectionFormatTab:
+ sep = "\t"
+ case collectionFormatPipe:
+ sep = "|"
+ case collectionFormatMulti:
+ return data
+ default:
+ sep = ","
+ }
+ return []string{strings.Join(data, sep)}
+}
+
+// SplitByFormat splits a string by a known format:
+//
+// ssv: space separated value
+// tsv: tab separated value
+// pipes: pipe (|) separated value
+// csv: comma separated value (default)
+func SplitByFormat(data, format string) []string {
+ if data == "" {
+ return nil
+ }
+ var sep string
+ switch format {
+ case collectionFormatSpace:
+ sep = " "
+ case collectionFormatTab:
+ sep = "\t"
+ case collectionFormatPipe:
+ sep = "|"
+ case collectionFormatMulti:
+ return nil
+ default:
+ sep = ","
+ }
+ var result []string
+ for _, s := range strings.Split(data, sep) {
+ if ts := strings.TrimSpace(s); ts != "" {
+ result = append(result, ts)
+ }
+ }
+ return result
+}
+
+// Removes leading whitespaces
+func trim(str string) string {
+ return strings.TrimSpace(str)
+}
+
+// Shortcut to strings.ToUpper()
+func upper(str string) string {
+ return strings.ToUpper(trim(str))
+}
+
+// Shortcut to strings.ToLower()
+func lower(str string) string {
+ return strings.ToLower(trim(str))
+}
+
+// Camelize an uppercased word
+func Camelize(word string) string {
+ camelized := poolOfBuffers.BorrowBuffer(len(word))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(camelized)
+ }()
+
+ for pos, ru := range []rune(word) {
+ if pos > 0 {
+ camelized.WriteRune(unicode.ToLower(ru))
+ } else {
+ camelized.WriteRune(unicode.ToUpper(ru))
+ }
+ }
+ return camelized.String()
+}
+
+// ToFileName lowercases and underscores a go type name
+func ToFileName(name string) string {
+ in := split(name)
+ out := make([]string, 0, len(in))
+
+ for _, w := range in {
+ out = append(out, lower(w))
+ }
+
+ return strings.Join(out, "_")
+}
+
+// ToCommandName lowercases and underscores a go type name
+func ToCommandName(name string) string {
+ in := split(name)
+ out := make([]string, 0, len(in))
+
+ for _, w := range in {
+ out = append(out, lower(w))
+ }
+ return strings.Join(out, "-")
+}
+
+// ToHumanNameLower represents a code name as a human series of words
+func ToHumanNameLower(name string) string {
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ out := make([]string, 0, len(*in))
+
+ for _, w := range *in {
+ if !w.IsInitialism() {
+ out = append(out, lower(w.GetOriginal()))
+ } else {
+ out = append(out, trim(w.GetOriginal()))
+ }
+ }
+ poolOfLexems.RedeemLexems(in)
+
+ return strings.Join(out, " ")
+}
+
+// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
+func ToHumanNameTitle(name string) string {
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+
+ out := make([]string, 0, len(*in))
+ for _, w := range *in {
+ original := trim(w.GetOriginal())
+ if !w.IsInitialism() {
+ out = append(out, Camelize(original))
+ } else {
+ out = append(out, original)
+ }
+ }
+ poolOfLexems.RedeemLexems(in)
+
+ return strings.Join(out, " ")
+}
+
+// ToJSONName camelcases a name which can be underscored or pascal cased
+func ToJSONName(name string) string {
+ in := split(name)
+ out := make([]string, 0, len(in))
+
+ for i, w := range in {
+ if i == 0 {
+ out = append(out, lower(w))
+ continue
+ }
+ out = append(out, Camelize(trim(w)))
+ }
+ return strings.Join(out, "")
+}
+
+// ToVarName camelcases a name which can be underscored or pascal cased
+func ToVarName(name string) string {
+ res := ToGoName(name)
+ if isInitialism(res) {
+ return lower(res)
+ }
+ if len(res) <= 1 {
+ return lower(res)
+ }
+ return lower(res[:1]) + res[1:]
+}
+
+// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
+func ToGoName(name string) string {
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ lexems := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ defer func() {
+ poolOfLexems.RedeemLexems(lexems)
+ }()
+ lexemes := *lexems
+
+ if len(lexemes) == 0 {
+ return ""
+ }
+
+ result := poolOfBuffers.BorrowBuffer(len(name))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(result)
+ }()
+
+ // check if not starting with a letter, upper case
+ firstPart := lexemes[0].GetUnsafeGoName()
+ if lexemes[0].IsInitialism() {
+ firstPart = upper(firstPart)
+ }
+
+ if c := firstPart[0]; c < utf8.RuneSelf {
+ // ASCII
+ switch {
+ case 'A' <= c && c <= 'Z':
+ result.WriteString(firstPart)
+ case 'a' <= c && c <= 'z':
+ result.WriteByte(c - 'a' + 'A')
+ result.WriteString(firstPart[1:])
+ default:
+ result.WriteString(prefixFunc(name, firstPart))
+ // NOTE: no longer check if prefixFunc returns a string that starts with uppercase:
+ // assume this is always the case
+ }
+ } else {
+ // unicode
+ firstRune, _ := utf8.DecodeRuneInString(firstPart)
+ switch {
+ case !unicode.IsLetter(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ case !unicode.IsUpper(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ /*
+ result.WriteRune(unicode.ToUpper(firstRune))
+ result.WriteString(firstPart[offset:])
+ */
+ default:
+ result.WriteString(firstPart)
+ }
+ }
+
+ for _, lexem := range lexemes[1:] {
+ goName := lexem.GetUnsafeGoName()
+
+ // to support old behavior
+ if lexem.IsInitialism() {
+ goName = upper(goName)
+ }
+ result.WriteString(goName)
+ }
+
+ return result.String()
+}
+
+// ContainsStrings searches a slice of strings for a case-sensitive match
+func ContainsStrings(coll []string, item string) bool {
+ for _, a := range coll {
+ if a == item {
+ return true
+ }
+ }
+ return false
+}
+
+// ContainsStringsCI searches a slice of strings for a case-insensitive match
+func ContainsStringsCI(coll []string, item string) bool {
+ for _, a := range coll {
+ if strings.EqualFold(a, item) {
+ return true
+ }
+ }
+ return false
+}
+
+type zeroable interface {
+ IsZero() bool
+}
+
+// IsZero returns true when the value passed into the function is a zero value.
+// This allows for safer checking of interface values.
+func IsZero(data interface{}) bool {
+ v := reflect.ValueOf(data)
+ // check for nil data
+ switch v.Kind() { //nolint:exhaustive
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ if v.IsNil() {
+ return true
+ }
+ }
+
+ // check for things that have an IsZero method instead
+ if vv, ok := data.(zeroable); ok {
+ return vv.IsZero()
+ }
+
+ // continue with slightly more complex reflection
+ switch v.Kind() { //nolint:exhaustive
+ case reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Struct, reflect.Array:
+ return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface())
+ case reflect.Invalid:
+ return true
+ default:
+ return false
+ }
+}
+
+// CommandLineOptionsGroup represents a group of user-defined command line options
+type CommandLineOptionsGroup struct {
+ ShortDescription string
+ LongDescription string
+ Options interface{}
+}
diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go
new file mode 100644
index 00000000..57534653
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/yaml.go
@@ -0,0 +1,481 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strconv"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+ yaml "gopkg.in/yaml.v3"
+)
+
+// YAMLMatcher matches yaml
+func YAMLMatcher(path string) bool {
+ ext := filepath.Ext(path)
+ return ext == ".yaml" || ext == ".yml"
+}
+
+// YAMLToJSON converts YAML unmarshaled data into json compatible data
+func YAMLToJSON(data interface{}) (json.RawMessage, error) {
+ jm, err := transformData(data)
+ if err != nil {
+ return nil, err
+ }
+ b, err := WriteJSON(jm)
+ return json.RawMessage(b), err
+}
+
+// BytesToYAMLDoc converts a byte slice into a YAML document
+func BytesToYAMLDoc(data []byte) (interface{}, error) {
+ var document yaml.Node // preserve order that is present in the document
+ if err := yaml.Unmarshal(data, &document); err != nil {
+ return nil, err
+ }
+ if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
+ return nil, fmt.Errorf("only YAML documents that are objects are supported: %w", ErrYAML)
+ }
+ return &document, nil
+}
+
+func yamlNode(root *yaml.Node) (interface{}, error) {
+ switch root.Kind {
+ case yaml.DocumentNode:
+ return yamlDocument(root)
+ case yaml.SequenceNode:
+ return yamlSequence(root)
+ case yaml.MappingNode:
+ return yamlMapping(root)
+ case yaml.ScalarNode:
+ return yamlScalar(root)
+ case yaml.AliasNode:
+ return yamlNode(root.Alias)
+ default:
+ return nil, fmt.Errorf("unsupported YAML node type: %v: %w", root.Kind, ErrYAML)
+ }
+}
+
+func yamlDocument(node *yaml.Node) (interface{}, error) {
+ if len(node.Content) != 1 {
+ return nil, fmt.Errorf("unexpected YAML Document node content length: %d: %w", len(node.Content), ErrYAML)
+ }
+ return yamlNode(node.Content[0])
+}
+
+func yamlMapping(node *yaml.Node) (interface{}, error) {
+ const sensibleAllocDivider = 2
+ m := make(JSONMapSlice, len(node.Content)/sensibleAllocDivider)
+
+ var j int
+ for i := 0; i < len(node.Content); i += 2 {
+ var nmi JSONMapItem
+ k, err := yamlStringScalarC(node.Content[i])
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode YAML map key: %w: %w", err, ErrYAML)
+ }
+ nmi.Key = k
+ v, err := yamlNode(node.Content[i+1])
+ if err != nil {
+ return nil, fmt.Errorf("unable to process YAML map value for key %q: %w: %w", k, err, ErrYAML)
+ }
+ nmi.Value = v
+ m[j] = nmi
+ j++
+ }
+ return m, nil
+}
+
+func yamlSequence(node *yaml.Node) (interface{}, error) {
+ s := make([]interface{}, 0)
+
+ for i := 0; i < len(node.Content); i++ {
+
+ v, err := yamlNode(node.Content[i])
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode YAML sequence value: %w: %w", err, ErrYAML)
+ }
+ s = append(s, v)
+ }
+ return s, nil
+}
+
+const ( // See https://yaml.org/type/
+ yamlStringScalar = "tag:yaml.org,2002:str"
+ yamlIntScalar = "tag:yaml.org,2002:int"
+ yamlBoolScalar = "tag:yaml.org,2002:bool"
+ yamlFloatScalar = "tag:yaml.org,2002:float"
+ yamlTimestamp = "tag:yaml.org,2002:timestamp"
+ yamlNull = "tag:yaml.org,2002:null"
+)
+
+func yamlScalar(node *yaml.Node) (interface{}, error) {
+ switch node.LongTag() {
+ case yamlStringScalar:
+ return node.Value, nil
+ case yamlBoolScalar:
+ b, err := strconv.ParseBool(node.Value)
+ if err != nil {
+ return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w: %w", node.Value, err, ErrYAML)
+ }
+ return b, nil
+ case yamlIntScalar:
+ i, err := strconv.ParseInt(node.Value, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w: %w", node.Value, err, ErrYAML)
+ }
+ return i, nil
+ case yamlFloatScalar:
+ f, err := strconv.ParseFloat(node.Value, 64)
+ if err != nil {
+ return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w: %w", node.Value, err, ErrYAML)
+ }
+ return f, nil
+ case yamlTimestamp:
+ return node.Value, nil
+ case yamlNull:
+ return nil, nil //nolint:nilnil
+ default:
+ return nil, fmt.Errorf("YAML tag %q is not supported: %w", node.LongTag(), ErrYAML)
+ }
+}
+
+func yamlStringScalarC(node *yaml.Node) (string, error) {
+ if node.Kind != yaml.ScalarNode {
+ return "", fmt.Errorf("expecting a string scalar but got %q: %w", node.Kind, ErrYAML)
+ }
+ switch node.LongTag() {
+ case yamlStringScalar, yamlIntScalar, yamlFloatScalar:
+ return node.Value, nil
+ default:
+ return "", fmt.Errorf("YAML tag %q is not supported as map key: %w", node.LongTag(), ErrYAML)
+ }
+}
+
+// JSONMapSlice represent a JSON object, with the order of keys maintained
+type JSONMapSlice []JSONMapItem
+
+// MarshalJSON renders a JSONMapSlice as JSON
+func (s JSONMapSlice) MarshalJSON() ([]byte, error) {
+ w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty}
+ s.MarshalEasyJSON(w)
+ return w.BuildBytes()
+}
+
+// MarshalEasyJSON renders a JSONMapSlice as JSON, using easyJSON
+func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) {
+ w.RawByte('{')
+
+ ln := len(s)
+ last := ln - 1
+ for i := 0; i < ln; i++ {
+ s[i].MarshalEasyJSON(w)
+ if i != last { // last item
+ w.RawByte(',')
+ }
+ }
+
+ w.RawByte('}')
+}
+
+// UnmarshalJSON makes a JSONMapSlice from JSON
+func (s *JSONMapSlice) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ s.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// UnmarshalEasyJSON makes a JSONMapSlice from JSON, using easyJSON
+func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ if in.IsNull() {
+ in.Skip()
+ return
+ }
+
+ var result JSONMapSlice
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ var mi JSONMapItem
+ mi.UnmarshalEasyJSON(in)
+ result = append(result, mi)
+ }
+ *s = result
+}
+
+func (s JSONMapSlice) MarshalYAML() (interface{}, error) {
+ var n yaml.Node
+ n.Kind = yaml.DocumentNode
+ var nodes []*yaml.Node
+ for _, item := range s {
+ nn, err := json2yaml(item.Value)
+ if err != nil {
+ return nil, err
+ }
+ ns := []*yaml.Node{
+ {
+ Kind: yaml.ScalarNode,
+ Tag: yamlStringScalar,
+ Value: item.Key,
+ },
+ nn,
+ }
+ nodes = append(nodes, ns...)
+ }
+
+ n.Content = []*yaml.Node{
+ {
+ Kind: yaml.MappingNode,
+ Content: nodes,
+ },
+ }
+
+ return yaml.Marshal(&n)
+}
+
+func isNil(input interface{}) bool {
+ if input == nil {
+ return true
+ }
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
+func json2yaml(item interface{}) (*yaml.Node, error) {
+ if isNil(item) {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Value: "null",
+ }, nil
+ }
+
+ switch val := item.(type) {
+ case JSONMapSlice:
+ var n yaml.Node
+ n.Kind = yaml.MappingNode
+ for i := range val {
+ childNode, err := json2yaml(&val[i].Value)
+ if err != nil {
+ return nil, err
+ }
+ n.Content = append(n.Content, &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: yamlStringScalar,
+ Value: val[i].Key,
+ }, childNode)
+ }
+ return &n, nil
+ case map[string]interface{}:
+ var n yaml.Node
+ n.Kind = yaml.MappingNode
+ keys := make([]string, 0, len(val))
+ for k := range val {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := val[k]
+ childNode, err := json2yaml(v)
+ if err != nil {
+ return nil, err
+ }
+ n.Content = append(n.Content, &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: yamlStringScalar,
+ Value: k,
+ }, childNode)
+ }
+ return &n, nil
+ case []interface{}:
+ var n yaml.Node
+ n.Kind = yaml.SequenceNode
+ for i := range val {
+ childNode, err := json2yaml(val[i])
+ if err != nil {
+ return nil, err
+ }
+ n.Content = append(n.Content, childNode)
+ }
+ return &n, nil
+ case string:
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: yamlStringScalar,
+ Value: val,
+ }, nil
+ case float64:
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: yamlFloatScalar,
+ Value: strconv.FormatFloat(val, 'f', -1, 64),
+ }, nil
+ case int64:
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: yamlIntScalar,
+ Value: strconv.FormatInt(val, 10),
+ }, nil
+ case uint64:
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: yamlIntScalar,
+ Value: strconv.FormatUint(val, 10),
+ }, nil
+ case bool:
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Tag: yamlBoolScalar,
+ Value: strconv.FormatBool(val),
+ }, nil
+ default:
+ return nil, fmt.Errorf("unhandled type: %T: %w", val, ErrYAML)
+ }
+}
+
+// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice
+type JSONMapItem struct {
+ Key string
+ Value interface{}
+}
+
+// MarshalJSON renders a JSONMapItem as JSON
+func (s JSONMapItem) MarshalJSON() ([]byte, error) {
+ w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty}
+ s.MarshalEasyJSON(w)
+ return w.BuildBytes()
+}
+
+// MarshalEasyJSON renders a JSONMapItem as JSON, using easyJSON
+func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) {
+ w.String(s.Key)
+ w.RawByte(':')
+ w.Raw(WriteJSON(s.Value))
+}
+
+// UnmarshalJSON makes a JSONMapItem from JSON
+func (s *JSONMapItem) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ s.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// UnmarshalEasyJSON makes a JSONMapItem from JSON, using easyJSON
+func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ key := in.UnsafeString()
+ in.WantColon()
+ value := in.Interface()
+ in.WantComma()
+ s.Key = key
+ s.Value = value
+}
+
+func transformData(input interface{}) (out interface{}, err error) {
+ format := func(t interface{}) (string, error) {
+ switch k := t.(type) {
+ case string:
+ return k, nil
+ case uint:
+ return strconv.FormatUint(uint64(k), 10), nil
+ case uint8:
+ return strconv.FormatUint(uint64(k), 10), nil
+ case uint16:
+ return strconv.FormatUint(uint64(k), 10), nil
+ case uint32:
+ return strconv.FormatUint(uint64(k), 10), nil
+ case uint64:
+ return strconv.FormatUint(k, 10), nil
+ case int:
+ return strconv.Itoa(k), nil
+ case int8:
+ return strconv.FormatInt(int64(k), 10), nil
+ case int16:
+ return strconv.FormatInt(int64(k), 10), nil
+ case int32:
+ return strconv.FormatInt(int64(k), 10), nil
+ case int64:
+ return strconv.FormatInt(k, 10), nil
+ default:
+ return "", fmt.Errorf("unexpected map key type, got: %T: %w", k, ErrYAML)
+ }
+ }
+
+ switch in := input.(type) {
+ case yaml.Node:
+ return yamlNode(&in)
+ case *yaml.Node:
+ return yamlNode(in)
+ case map[interface{}]interface{}:
+ o := make(JSONMapSlice, 0, len(in))
+ for ke, va := range in {
+ var nmi JSONMapItem
+ if nmi.Key, err = format(ke); err != nil {
+ return nil, err
+ }
+
+ v, ert := transformData(va)
+ if ert != nil {
+ return nil, ert
+ }
+ nmi.Value = v
+ o = append(o, nmi)
+ }
+ return o, nil
+ case []interface{}:
+ len1 := len(in)
+ o := make([]interface{}, len1)
+ for i := 0; i < len1; i++ {
+ o[i], err = transformData(in[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return o, nil
+ }
+ return input, nil
+}
+
+// YAMLDoc loads a yaml document from either http or a file and converts it to json
+func YAMLDoc(path string) (json.RawMessage, error) {
+ yamlDoc, err := YAMLData(path)
+ if err != nil {
+ return nil, err
+ }
+
+ data, err := YAMLToJSON(yamlDoc)
+ if err != nil {
+ return nil, err
+ }
+
+ return data, nil
+}
+
+// YAMLData loads a yaml document from either http or a file
+func YAMLData(path string) (interface{}, error) {
+ data, err := LoadFromFileOrHTTP(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return BytesToYAMLDoc(data)
+}
diff --git a/vendor/github.com/go-openapi/validate/.editorconfig b/vendor/github.com/go-openapi/validate/.editorconfig
new file mode 100644
index 00000000..3152da69
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-openapi/validate/.gitattributes b/vendor/github.com/go-openapi/validate/.gitattributes
new file mode 100644
index 00000000..49ad5276
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/.gitattributes
@@ -0,0 +1,2 @@
+# gofmt always uses LF, whereas Git uses CRLF on Windows.
+*.go text eol=lf
diff --git a/vendor/github.com/go-openapi/validate/.gitignore b/vendor/github.com/go-openapi/validate/.gitignore
new file mode 100644
index 00000000..fea8b84e
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/.gitignore
@@ -0,0 +1,5 @@
+secrets.yml
+coverage.out
+*.cov
+*.out
+playground
diff --git a/vendor/github.com/go-openapi/validate/.golangci.yml b/vendor/github.com/go-openapi/validate/.golangci.yml
new file mode 100644
index 00000000..22f8d21c
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/validate/BENCHMARK.md b/vendor/github.com/go-openapi/validate/BENCHMARK.md
new file mode 100644
index 00000000..79cf6a07
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/BENCHMARK.md
@@ -0,0 +1,31 @@
+# Benchmark
+
+Validating the Kubernetes Swagger API
+
+## v0.22.6: 60,000,000 allocs
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/validate
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 8549863982 ns/op 7067424936 B/op 59583275 allocs/op
+```
+
+## After refact PR: minor but noticable improvements: 25,000,000 allocs
+```
+go test -bench Spec
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/validate
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 4064535557 ns/op 3379715592 B/op 25320330 allocs/op
+```
+
+## After reduce GC pressure PR: 17,000,000 allocs
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/validate
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 3758414145 ns/op 2593881496 B/op 17111373 allocs/op
+```
diff --git a/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/validate/LICENSE b/vendor/github.com/go-openapi/validate/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-openapi/validate/README.md b/vendor/github.com/go-openapi/validate/README.md
new file mode 100644
index 00000000..e8e1bb21
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/README.md
@@ -0,0 +1,36 @@
+# Validation helpers [](https://github.com/go-openapi/validate/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/validate)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/validate/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/validate)
+[](https://goreportcard.com/report/github.com/go-openapi/validate)
+
+This package provides helpers to validate Swagger 2.0. specification (aka OpenAPI 2.0).
+
+Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md.
+
+## What's inside?
+
+* A validator for Swagger specifications
+* A validator for JSON schemas draft4
+* Helper functions to validate individual values (used by code generated by [go-swagger](https://github.com/go-swagger/go-swagger)).
+ * Required, RequiredNumber, RequiredString
+ * ReadOnly
+ * UniqueItems, MaxItems, MinItems
+ * Enum, EnumCase
+ * Pattern, MinLength, MaxLength
+ * Minimum, Maximum, MultipleOf
+ * FormatOf
+
+[Documentation](https://pkg.go.dev/github.com/go-openapi/validate)
+
+## FAQ
+
+* Does this library support OpenAPI 3?
+
+> No.
+> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0).
+> There is no plan to make it evolve toward supporting OpenAPI 3.x.
+> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story.
+>
+> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3
diff --git a/vendor/github.com/go-openapi/validate/context.go b/vendor/github.com/go-openapi/validate/context.go
new file mode 100644
index 00000000..89977173
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/context.go
@@ -0,0 +1,56 @@
+package validate
+
+import (
+ "context"
+)
+
+// validateCtxKey is the key type of context key in this pkg
+type validateCtxKey string
+
+const (
+ operationTypeKey validateCtxKey = "operationTypeKey"
+)
+
+type operationType string
+
+const (
+ request operationType = "request"
+ response operationType = "response"
+ none operationType = "none" // not specified in ctx
+)
+
+var operationTypeEnum = []operationType{request, response, none}
+
+// WithOperationRequest returns a new context with operationType request
+// in context value
+func WithOperationRequest(ctx context.Context) context.Context {
+ return withOperation(ctx, request)
+}
+
+// WithOperationRequest returns a new context with operationType response
+// in context value
+func WithOperationResponse(ctx context.Context) context.Context {
+ return withOperation(ctx, response)
+}
+
+func withOperation(ctx context.Context, operation operationType) context.Context {
+ return context.WithValue(ctx, operationTypeKey, operation)
+}
+
+// extractOperationType extracts the operation type from ctx
+// if not specified or of unknown value, return none operation type
+func extractOperationType(ctx context.Context) operationType {
+ v := ctx.Value(operationTypeKey)
+ if v == nil {
+ return none
+ }
+ res, ok := v.(operationType)
+ if !ok {
+ return none
+ }
+ // validate the value is in operation enum
+ if err := Enum("", "", res, operationTypeEnum); err != nil {
+ return none
+ }
+ return res
+}
diff --git a/vendor/github.com/go-openapi/validate/debug.go b/vendor/github.com/go-openapi/validate/debug.go
new file mode 100644
index 00000000..8815fd93
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/debug.go
@@ -0,0 +1,47 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+var (
+ // Debug is true when the SWAGGER_DEBUG env var is not empty.
+ // It enables a more verbose logging of validators.
+ Debug = os.Getenv("SWAGGER_DEBUG") != ""
+ // validateLogger is a debug logger for this package
+ validateLogger *log.Logger
+)
+
+func init() {
+ debugOptions()
+}
+
+func debugOptions() {
+ validateLogger = log.New(os.Stdout, "validate:", log.LstdFlags)
+}
+
+func debugLog(msg string, args ...interface{}) {
+ // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog()
+ if Debug {
+ _, file1, pos1, _ := runtime.Caller(1)
+ validateLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go
new file mode 100644
index 00000000..e0dd9383
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/default_validator.go
@@ -0,0 +1,304 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/go-openapi/spec"
+)
+
+// defaultValidator validates default values in a spec.
+// According to Swagger spec, default values MUST validate their schema.
+type defaultValidator struct {
+ SpecValidator *SpecValidator
+ visitedSchemas map[string]struct{}
+ schemaOptions *SchemaValidatorOptions
+}
+
+// resetVisited resets the internal state of visited schemas
+func (d *defaultValidator) resetVisited() {
+ if d.visitedSchemas == nil {
+ d.visitedSchemas = make(map[string]struct{})
+
+ return
+ }
+
+ // TODO(go1.21): clear(ex.visitedSchemas)
+ for k := range d.visitedSchemas {
+ delete(d.visitedSchemas, k)
+ }
+}
+
+func isVisited(path string, visitedSchemas map[string]struct{}) bool {
+ _, found := visitedSchemas[path]
+ if found {
+ return true
+ }
+
+ // search for overlapping paths
+ var (
+ parent string
+ suffix string
+ )
+ for i := len(path) - 2; i >= 0; i-- {
+ r := path[i]
+ if r != '.' {
+ continue
+ }
+
+ parent = path[0:i]
+ suffix = path[i+1:]
+
+ if strings.HasSuffix(parent, suffix) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// beingVisited asserts a schema is being visited
+func (d *defaultValidator) beingVisited(path string) {
+ d.visitedSchemas[path] = struct{}{}
+}
+
+// isVisited tells if a path has already been visited
+func (d *defaultValidator) isVisited(path string) bool {
+ return isVisited(path, d.visitedSchemas)
+}
+
+// Validate validates the default values declared in the swagger spec
+func (d *defaultValidator) Validate() *Result {
+ errs := pools.poolOfResults.BorrowResult() // will redeem when merged
+
+ if d == nil || d.SpecValidator == nil {
+ return errs
+ }
+ d.resetVisited()
+ errs.Merge(d.validateDefaultValueValidAgainstSchema()) // error -
+ return errs
+}
+
+func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
+ // every default value that is specified must validate against the schema for that property
+ // headers, items, parameters, schema
+
+ res := pools.poolOfResults.BorrowResult() // will redeem when merged
+ s := d.SpecValidator
+
+ for method, pathItem := range s.expandedAnalyzer().Operations() {
+ for path, op := range pathItem {
+ // parameters
+ for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) {
+ if param.Default != nil && param.Required {
+ res.AddWarnings(requiredHasDefaultMsg(param.Name, param.In))
+ }
+
+ // reset explored schemas to get depth-first recursive-proof exploration
+ d.resetVisited()
+
+ // Check simple parameters first
+ // default values provided must validate against their inline definition (no explicit schema)
+ if param.Default != nil && param.Schema == nil {
+ // check param default value is valid
+ red := newParamValidator(¶m, s.KnownFormats, d.schemaOptions).Validate(param.Default) //#nosec
+ if red.HasErrorsOrWarnings() {
+ res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In))
+ res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+
+ // Recursively follows Items and Schemas
+ if param.Items != nil {
+ red := d.validateDefaultValueItemsAgainstSchema(param.Name, param.In, ¶m, param.Items) //#nosec
+ if red.HasErrorsOrWarnings() {
+ res.AddErrors(defaultValueItemsDoesNotValidateMsg(param.Name, param.In))
+ res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+
+ if param.Schema != nil {
+ // Validate default value against schema
+ red := d.validateDefaultValueSchemaAgainstSchema(param.Name, param.In, param.Schema)
+ if red.HasErrorsOrWarnings() {
+ res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In))
+ res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+ }
+
+ if op.Responses != nil {
+ if op.Responses.Default != nil {
+ // Same constraint on default Response
+ res.Merge(d.validateDefaultInResponse(op.Responses.Default, jsonDefault, path, 0, op.ID))
+ }
+ // Same constraint on regular Responses
+ if op.Responses.StatusCodeResponses != nil { // Safeguard
+ for code, r := range op.Responses.StatusCodeResponses {
+ res.Merge(d.validateDefaultInResponse(&r, "response", path, code, op.ID)) //#nosec
+ }
+ }
+ } else if op.ID != "" {
+ // Empty op.ID means there is no meaningful operation: no need to report a specific message
+ res.AddErrors(noValidResponseMsg(op.ID))
+ }
+ }
+ }
+ if s.spec.Spec().Definitions != nil { // Safeguard
+ // reset explored schemas to get depth-first recursive-proof exploration
+ d.resetVisited()
+ for nm, sch := range s.spec.Spec().Definitions {
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec
+ }
+ }
+ return res
+}
+
+func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, responseType, path string, responseCode int, operationID string) *Result {
+ s := d.SpecValidator
+
+ response, res := responseHelp.expandResponseRef(resp, path, s)
+ if !res.IsValid() {
+ return res
+ }
+
+ responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode)
+
+ if response.Headers != nil { // Safeguard
+ for nm, h := range response.Headers {
+ // reset explored schemas to get depth-first recursive-proof exploration
+ d.resetVisited()
+
+ if h.Default != nil {
+ red := newHeaderValidator(nm, &h, s.KnownFormats, d.schemaOptions).Validate(h.Default) //#nosec
+ if red.HasErrorsOrWarnings() {
+ res.AddErrors(defaultValueHeaderDoesNotValidateMsg(operationID, nm, responseName))
+ res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+
+ // Headers have inline definition, like params
+ if h.Items != nil {
+ red := d.validateDefaultValueItemsAgainstSchema(nm, "header", &h, h.Items) //#nosec
+ if red.HasErrorsOrWarnings() {
+ res.AddErrors(defaultValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName))
+ res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+
+ if _, err := compileRegexp(h.Pattern); err != nil {
+ res.AddErrors(invalidPatternInHeaderMsg(operationID, nm, responseName, h.Pattern, err))
+ }
+
+ // Headers don't have schema
+ }
+ }
+ if response.Schema != nil {
+ // reset explored schemas to get depth-first recursive-proof exploration
+ d.resetVisited()
+
+ red := d.validateDefaultValueSchemaAgainstSchema(responseCodeAsStr, "response", response.Schema)
+ if red.HasErrorsOrWarnings() {
+ // Additional message to make sure the context of the error is not lost
+ res.AddErrors(defaultValueInDoesNotValidateMsg(operationID, responseName))
+ res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+ return res
+}
+
+func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in string, schema *spec.Schema) *Result {
+ if schema == nil || d.isVisited(path) {
+ // Avoids recursing if we are already done with that check
+ return nil
+ }
+ d.beingVisited(path)
+ res := pools.poolOfResults.BorrowResult()
+ s := d.SpecValidator
+
+ if schema.Default != nil {
+ res.Merge(
+ newSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, d.schemaOptions).Validate(schema.Default),
+ )
+ }
+ if schema.Items != nil {
+ if schema.Items.Schema != nil {
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".items.default", in, schema.Items.Schema))
+ }
+ // Multiple schemas in items
+ if schema.Items.Schemas != nil { // Safeguard
+ for i, sch := range schema.Items.Schemas {
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.items[%d].default", path, i), in, &sch)) //#nosec
+ }
+ }
+ }
+ if _, err := compileRegexp(schema.Pattern); err != nil {
+ res.AddErrors(invalidPatternInMsg(path, in, schema.Pattern))
+ }
+ if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
+ // NOTE: we keep validating values, even though additionalItems is not supported by Swagger 2.0 (and 3.0 as well)
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema))
+ }
+ for propName, prop := range schema.Properties {
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
+ }
+ for propName, prop := range schema.PatternProperties {
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
+ }
+ if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema))
+ }
+ if schema.AllOf != nil {
+ for i, aoSch := range schema.AllOf {
+ res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.allOf[%d]", path, i), in, &aoSch)) //#nosec
+ }
+ }
+ return res
+}
+
+// TODO: Temporary duplicated code. Need to refactor with examples
+
+func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result {
+ res := pools.poolOfResults.BorrowResult()
+ s := d.SpecValidator
+ if items != nil {
+ if items.Default != nil {
+ res.Merge(
+ newItemsValidator(path, in, items, root, s.KnownFormats, d.schemaOptions).Validate(0, items.Default),
+ )
+ }
+ if items.Items != nil {
+ res.Merge(d.validateDefaultValueItemsAgainstSchema(path+"[0].default", in, root, items.Items))
+ }
+ if _, err := compileRegexp(items.Pattern); err != nil {
+ res.AddErrors(invalidPatternInMsg(path, in, items.Pattern))
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/go-openapi/validate/doc.go b/vendor/github.com/go-openapi/validate/doc.go
new file mode 100644
index 00000000..d2b901ea
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/doc.go
@@ -0,0 +1,87 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package validate provides methods to validate a swagger specification,
+as well as tools to validate data against their schema.
+
+This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference
+can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md.
+
+# Validating a specification
+
+Validates a spec document (from JSON or YAML) against the JSON schema for swagger,
+then checks a number of extra rules that can't be expressed in JSON schema.
+
+Entry points:
+ - Spec()
+ - NewSpecValidator()
+ - SpecValidator.Validate()
+
+Reported as errors:
+
+ [x] definition can't declare a property that's already defined by one of its ancestors
+ [x] definition's ancestor can't be a descendant of the same model
+ [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness.
+ [x] each security reference should contain only unique scopes
+ [x] each security scope in a security definition should be unique
+ [x] parameters in path must be unique
+ [x] each path parameter must correspond to a parameter placeholder and vice versa
+ [x] each referenceable definition must have references
+ [x] each definition property listed in the required array must be defined in the properties of the model
+ [x] each parameter should have a unique `name` and `type` combination
+ [x] each operation should have only 1 parameter of type body
+ [x] each reference must point to a valid object
+ [x] every default value that is specified must validate against the schema for that property
+ [x] items property is required for all schemas/definitions of type `array`
+ [x] path parameters must be declared a required
+ [x] headers must not contain $ref
+ [x] schema and property examples provided must validate against their respective object's schema
+ [x] examples provided must validate their schema
+
+Reported as warnings:
+
+ [x] path parameters should not contain any of [{,},\w]
+ [x] empty path
+ [x] unused definitions
+ [x] unsupported validation of examples on non-JSON media types
+ [x] examples in response without schema
+ [x] readOnly properties should not be required
+
+# Validating a schema
+
+The schema validation toolkit validates data against JSON-schema-draft 04 schema.
+
+It is tested against the full json-schema-testing-suite (https://github.com/json-schema-org/JSON-Schema-Test-Suite),
+except for the optional part (bignum, ECMA regexp, ...).
+
+It supports the complete JSON-schema vocabulary, including keywords not supported by Swagger (e.g. additionalItems, ...)
+
+Entry points:
+ - AgainstSchema()
+ - ...
+
+# Known limitations
+
+With the current version of this package, the following aspects of swagger are not yet supported:
+
+ [ ] errors and warnings are not reported with key/line number in spec
+ [ ] default values and examples on responses only support application/json producer type
+ [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values
+ [ ] rules for collectionFormat are not implemented
+ [ ] no validation rule for polymorphism support (discriminator) [not done here]
+ [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid
+ [ ] arbitrary large numbers are not supported: max is math.MaxFloat64
+*/
+package validate
diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go
new file mode 100644
index 00000000..d0895697
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/example_validator.go
@@ -0,0 +1,299 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/spec"
+)
+
+// ExampleValidator validates example values defined in a spec
+type exampleValidator struct {
+ SpecValidator *SpecValidator
+ visitedSchemas map[string]struct{}
+ schemaOptions *SchemaValidatorOptions
+}
+
+// resetVisited resets the internal state of visited schemas
+func (ex *exampleValidator) resetVisited() {
+ if ex.visitedSchemas == nil {
+ ex.visitedSchemas = make(map[string]struct{})
+
+ return
+ }
+
+ // TODO(go1.21): clear(ex.visitedSchemas)
+ for k := range ex.visitedSchemas {
+ delete(ex.visitedSchemas, k)
+ }
+}
+
+// beingVisited asserts a schema is being visited
+func (ex *exampleValidator) beingVisited(path string) {
+ ex.visitedSchemas[path] = struct{}{}
+}
+
+// isVisited tells if a path has already been visited
+func (ex *exampleValidator) isVisited(path string) bool {
+ return isVisited(path, ex.visitedSchemas)
+}
+
+// Validate validates the example values declared in the swagger spec
+// Example values MUST conform to their schema.
+//
+// With Swagger 2.0, examples are supported in:
+// - schemas
+// - individual property
+// - responses
+func (ex *exampleValidator) Validate() *Result {
+ errs := pools.poolOfResults.BorrowResult()
+
+ if ex == nil || ex.SpecValidator == nil {
+ return errs
+ }
+ ex.resetVisited()
+ errs.Merge(ex.validateExampleValueValidAgainstSchema()) // error -
+
+ return errs
+}
+
+func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
+ // every example value that is specified must validate against the schema for that property
+ // in: schemas, properties, object, items
+ // not in: headers, parameters without schema
+
+ res := pools.poolOfResults.BorrowResult()
+ s := ex.SpecValidator
+
+ for method, pathItem := range s.expandedAnalyzer().Operations() {
+ for path, op := range pathItem {
+ // parameters
+ for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) {
+
+ // As of swagger 2.0, Examples are not supported in simple parameters
+ // However, it looks like it is supported by go-openapi
+
+ // reset explored schemas to get depth-first recursive-proof exploration
+ ex.resetVisited()
+
+ // Check simple parameters first
+ // default values provided must validate against their inline definition (no explicit schema)
+ if param.Example != nil && param.Schema == nil {
+ // check param default value is valid
+ red := newParamValidator(¶m, s.KnownFormats, ex.schemaOptions).Validate(param.Example) //#nosec
+ if red.HasErrorsOrWarnings() {
+ res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In))
+ res.MergeAsWarnings(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+
+ // Recursively follows Items and Schemas
+ if param.Items != nil {
+ red := ex.validateExampleValueItemsAgainstSchema(param.Name, param.In, ¶m, param.Items) //#nosec
+ if red.HasErrorsOrWarnings() {
+ res.AddWarnings(exampleValueItemsDoesNotValidateMsg(param.Name, param.In))
+ res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+
+ if param.Schema != nil {
+ // Validate example value against schema
+ red := ex.validateExampleValueSchemaAgainstSchema(param.Name, param.In, param.Schema)
+ if red.HasErrorsOrWarnings() {
+ res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In))
+ res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+ }
+
+ if op.Responses != nil {
+ if op.Responses.Default != nil {
+ // Same constraint on default Response
+ res.Merge(ex.validateExampleInResponse(op.Responses.Default, jsonDefault, path, 0, op.ID))
+ }
+ // Same constraint on regular Responses
+ if op.Responses.StatusCodeResponses != nil { // Safeguard
+ for code, r := range op.Responses.StatusCodeResponses {
+ res.Merge(ex.validateExampleInResponse(&r, "response", path, code, op.ID)) //#nosec
+ }
+ }
+ } else if op.ID != "" {
+ // Empty op.ID means there is no meaningful operation: no need to report a specific message
+ res.AddErrors(noValidResponseMsg(op.ID))
+ }
+ }
+ }
+ if s.spec.Spec().Definitions != nil { // Safeguard
+ // reset explored schemas to get depth-first recursive-proof exploration
+ ex.resetVisited()
+ for nm, sch := range s.spec.Spec().Definitions {
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec
+ }
+ }
+ return res
+}
+
+func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, responseType, path string, responseCode int, operationID string) *Result {
+ s := ex.SpecValidator
+
+ response, res := responseHelp.expandResponseRef(resp, path, s)
+ if !res.IsValid() { // Safeguard
+ return res
+ }
+
+ responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode)
+
+ if response.Headers != nil { // Safeguard
+ for nm, h := range response.Headers {
+ // reset explored schemas to get depth-first recursive-proof exploration
+ ex.resetVisited()
+
+ if h.Example != nil {
+ red := newHeaderValidator(nm, &h, s.KnownFormats, ex.schemaOptions).Validate(h.Example) //#nosec
+ if red.HasErrorsOrWarnings() {
+ res.AddWarnings(exampleValueHeaderDoesNotValidateMsg(operationID, nm, responseName))
+ res.MergeAsWarnings(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+
+ // Headers have inline definition, like params
+ if h.Items != nil {
+ red := ex.validateExampleValueItemsAgainstSchema(nm, "header", &h, h.Items) //#nosec
+ if red.HasErrorsOrWarnings() {
+ res.AddWarnings(exampleValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName))
+ res.MergeAsWarnings(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+
+ if _, err := compileRegexp(h.Pattern); err != nil {
+ res.AddErrors(invalidPatternInHeaderMsg(operationID, nm, responseName, h.Pattern, err))
+ }
+
+ // Headers don't have schema
+ }
+ }
+ if response.Schema != nil {
+ // reset explored schemas to get depth-first recursive-proof exploration
+ ex.resetVisited()
+
+ red := ex.validateExampleValueSchemaAgainstSchema(responseCodeAsStr, "response", response.Schema)
+ if red.HasErrorsOrWarnings() {
+ // Additional message to make sure the context of the error is not lost
+ res.AddWarnings(exampleValueInDoesNotValidateMsg(operationID, responseName))
+ res.Merge(red)
+ } else if red.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(red)
+ }
+ }
+
+ if response.Examples != nil {
+ if response.Schema != nil {
+ if example, ok := response.Examples["application/json"]; ok {
+ res.MergeAsWarnings(
+ newSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, s.schemaOptions).Validate(example),
+ )
+ } else {
+ // TODO: validate other media types too
+ res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName))
+ }
+ } else {
+ res.AddWarnings(examplesWithoutSchemaMsg(operationID, responseName))
+ }
+ }
+ return res
+}
+
+func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in string, schema *spec.Schema) *Result {
+ if schema == nil || ex.isVisited(path) {
+ // Avoids recursing if we are already done with that check
+ return nil
+ }
+ ex.beingVisited(path)
+ s := ex.SpecValidator
+ res := pools.poolOfResults.BorrowResult()
+
+ if schema.Example != nil {
+ res.MergeAsWarnings(
+ newSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, ex.schemaOptions).Validate(schema.Example),
+ )
+ }
+ if schema.Items != nil {
+ if schema.Items.Schema != nil {
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".items.example", in, schema.Items.Schema))
+ }
+ // Multiple schemas in items
+ if schema.Items.Schemas != nil { // Safeguard
+ for i, sch := range schema.Items.Schemas {
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.items[%d].example", path, i), in, &sch)) //#nosec
+ }
+ }
+ }
+ if _, err := compileRegexp(schema.Pattern); err != nil {
+ res.AddErrors(invalidPatternInMsg(path, in, schema.Pattern))
+ }
+ if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
+ // NOTE: we keep validating values, even though additionalItems is unsupported in Swagger 2.0 (and 3.0 as well)
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema))
+ }
+ for propName, prop := range schema.Properties {
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
+ }
+ for propName, prop := range schema.PatternProperties {
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
+ }
+ if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema))
+ }
+ if schema.AllOf != nil {
+ for i, aoSch := range schema.AllOf {
+ res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.allOf[%d]", path, i), in, &aoSch)) //#nosec
+ }
+ }
+ return res
+}
+
+// TODO: Temporary duplicated code. Need to refactor with examples
+//
+
+func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result {
+ res := pools.poolOfResults.BorrowResult()
+ s := ex.SpecValidator
+ if items != nil {
+ if items.Example != nil {
+ res.MergeAsWarnings(
+ newItemsValidator(path, in, items, root, s.KnownFormats, ex.schemaOptions).Validate(0, items.Example),
+ )
+ }
+ if items.Items != nil {
+ res.Merge(ex.validateExampleValueItemsAgainstSchema(path+"[0].example", in, root, items.Items))
+ }
+ if _, err := compileRegexp(items.Pattern); err != nil {
+ res.AddErrors(invalidPatternInMsg(path, in, items.Pattern))
+ }
+ }
+
+ return res
+}
diff --git a/vendor/github.com/go-openapi/validate/formats.go b/vendor/github.com/go-openapi/validate/formats.go
new file mode 100644
index 00000000..f4e35521
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/formats.go
@@ -0,0 +1,99 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "reflect"
+
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+)
+
+type formatValidator struct {
+ Path string
+ In string
+ Format string
+ KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+func newFormatValidator(path, in, format string, formats strfmt.Registry, opts *SchemaValidatorOptions) *formatValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var f *formatValidator
+ if opts.recycleValidators {
+ f = pools.poolOfFormatValidators.BorrowValidator()
+ } else {
+ f = new(formatValidator)
+ }
+
+ f.Path = path
+ f.In = in
+ f.Format = format
+ f.KnownFormats = formats
+ f.Options = opts
+
+ return f
+}
+
+func (f *formatValidator) SetPath(path string) {
+ f.Path = path
+}
+
+func (f *formatValidator) Applies(source interface{}, kind reflect.Kind) bool {
+ if source == nil || f.KnownFormats == nil {
+ return false
+ }
+
+ switch source := source.(type) {
+ case *spec.Items:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ case *spec.Parameter:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ case *spec.Schema:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ case *spec.Header:
+ return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
+ default:
+ return false
+ }
+}
+
+func (f *formatValidator) Validate(val interface{}) *Result {
+ if f.Options.recycleValidators {
+ defer func() {
+ f.redeem()
+ }()
+ }
+
+ var result *Result
+ if f.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+
+ if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil {
+ result.AddErrors(err)
+ }
+
+ return result
+}
+
+func (f *formatValidator) redeem() {
+ pools.poolOfFormatValidators.RedeemValidator(f)
+}
diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go
new file mode 100644
index 00000000..757e403d
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/helpers.go
@@ -0,0 +1,333 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+// TODO: define this as package validate/internal
+// This must be done while keeping CI intact with all tests and test coverage
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/spec"
+)
+
+const (
+ swaggerBody = "body"
+ swaggerExample = "example"
+ swaggerExamples = "examples"
+)
+
+const (
+ objectType = "object"
+ arrayType = "array"
+ stringType = "string"
+ integerType = "integer"
+ numberType = "number"
+ booleanType = "boolean"
+ fileType = "file"
+ nullType = "null"
+)
+
+const (
+ jsonProperties = "properties"
+ jsonItems = "items"
+ jsonType = "type"
+ // jsonSchema = "schema"
+ jsonDefault = "default"
+)
+
+const (
+ stringFormatDate = "date"
+ stringFormatDateTime = "date-time"
+ stringFormatPassword = "password"
+ stringFormatByte = "byte"
+ // stringFormatBinary = "binary"
+ stringFormatCreditCard = "creditcard"
+ stringFormatDuration = "duration"
+ stringFormatEmail = "email"
+ stringFormatHexColor = "hexcolor"
+ stringFormatHostname = "hostname"
+ stringFormatIPv4 = "ipv4"
+ stringFormatIPv6 = "ipv6"
+ stringFormatISBN = "isbn"
+ stringFormatISBN10 = "isbn10"
+ stringFormatISBN13 = "isbn13"
+ stringFormatMAC = "mac"
+ stringFormatBSONObjectID = "bsonobjectid"
+ stringFormatRGBColor = "rgbcolor"
+ stringFormatSSN = "ssn"
+ stringFormatURI = "uri"
+ stringFormatUUID = "uuid"
+ stringFormatUUID3 = "uuid3"
+ stringFormatUUID4 = "uuid4"
+ stringFormatUUID5 = "uuid5"
+
+ integerFormatInt32 = "int32"
+ integerFormatInt64 = "int64"
+ integerFormatUInt32 = "uint32"
+ integerFormatUInt64 = "uint64"
+
+ numberFormatFloat32 = "float32"
+ numberFormatFloat64 = "float64"
+ numberFormatFloat = "float"
+ numberFormatDouble = "double"
+)
+
+// Helpers available at the package level
+var (
+ pathHelp *pathHelper
+ valueHelp *valueHelper
+ errorHelp *errorHelper
+ paramHelp *paramHelper
+ responseHelp *responseHelper
+)
+
+type errorHelper struct {
+ // A collection of unexported helpers for error construction
+}
+
+func (h *errorHelper) sErr(err errors.Error, recycle bool) *Result {
+ // Builds a Result from standard errors.Error
+ var result *Result
+ if recycle {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+ result.Errors = []error{err}
+
+ return result
+}
+
+func (h *errorHelper) addPointerError(res *Result, err error, ref string, fromPath string) *Result {
+ // Provides more context on error messages
+ // reported by the jsoinpointer package by altering the passed Result
+ if err != nil {
+ res.AddErrors(cannotResolveRefMsg(fromPath, ref, err))
+ }
+ return res
+}
+
+type pathHelper struct {
+ // A collection of unexported helpers for path validation
+}
+
+func (h *pathHelper) stripParametersInPath(path string) string {
+ // Returns a path stripped from all path parameters, with multiple or trailing slashes removed.
+ //
+ // Stripping is performed on a slash-separated basis, e.g '/a{/b}' remains a{/b} and not /a.
+ // - Trailing "/" make a difference, e.g. /a/ !~ /a (ex: canary/bitbucket.org/swagger.json)
+ // - presence or absence of a parameter makes a difference, e.g. /a/{log} !~ /a/ (ex: canary/kubernetes/swagger.json)
+
+ // Regexp to extract parameters from path, with surrounding {}.
+ // NOTE: important non-greedy modifier
+ rexParsePathParam := mustCompileRegexp(`{[^{}]+?}`)
+ strippedSegments := []string{}
+
+ for _, segment := range strings.Split(path, "/") {
+ strippedSegments = append(strippedSegments, rexParsePathParam.ReplaceAllString(segment, "X"))
+ }
+ return strings.Join(strippedSegments, "/")
+}
+
+func (h *pathHelper) extractPathParams(path string) (params []string) {
+ // Extracts all params from a path, with surrounding "{}"
+ rexParsePathParam := mustCompileRegexp(`{[^{}]+?}`)
+
+ for _, segment := range strings.Split(path, "/") {
+ for _, v := range rexParsePathParam.FindAllStringSubmatch(segment, -1) {
+ params = append(params, v...)
+ }
+ }
+ return
+}
+
+type valueHelper struct {
+ // A collection of unexported helpers for value validation
+}
+
+func (h *valueHelper) asInt64(val interface{}) int64 {
+ // Number conversion function for int64, without error checking
+ // (implements an implicit type upgrade).
+ v := reflect.ValueOf(val)
+ switch v.Kind() { //nolint:exhaustive
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return int64(v.Uint())
+ case reflect.Float32, reflect.Float64:
+ return int64(v.Float())
+ default:
+ // panic("Non numeric value in asInt64()")
+ return 0
+ }
+}
+
+func (h *valueHelper) asUint64(val interface{}) uint64 {
+ // Number conversion function for uint64, without error checking
+ // (implements an implicit type upgrade).
+ v := reflect.ValueOf(val)
+ switch v.Kind() { //nolint:exhaustive
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return uint64(v.Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return v.Uint()
+ case reflect.Float32, reflect.Float64:
+ return uint64(v.Float())
+ default:
+ // panic("Non numeric value in asUint64()")
+ return 0
+ }
+}
+
+// Same for unsigned floats
+func (h *valueHelper) asFloat64(val interface{}) float64 {
+ // Number conversion function for float64, without error checking
+ // (implements an implicit type upgrade).
+ v := reflect.ValueOf(val)
+ switch v.Kind() { //nolint:exhaustive
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return float64(v.Uint())
+ case reflect.Float32, reflect.Float64:
+ return v.Float()
+ default:
+ // panic("Non numeric value in asFloat64()")
+ return 0
+ }
+}
+
+type paramHelper struct {
+ // A collection of unexported helpers for parameters resolution
+}
+
+func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, res *Result, s *SpecValidator) (params []spec.Parameter) {
+ operation, ok := s.expandedAnalyzer().OperationFor(method, path)
+ if ok {
+ // expand parameters first if necessary
+ resolvedParams := []spec.Parameter{}
+ for _, ppr := range operation.Parameters {
+ resolvedParam, red := h.resolveParam(path, method, operationID, &ppr, s) //#nosec
+ res.Merge(red)
+ if resolvedParam != nil {
+ resolvedParams = append(resolvedParams, *resolvedParam)
+ }
+ }
+ // remove params with invalid expansion from Slice
+ operation.Parameters = resolvedParams
+
+ for _, ppr := range s.expandedAnalyzer().SafeParamsFor(method, path,
+ func(_ spec.Parameter, err error) bool {
+ // since params have already been expanded, there are few causes for error
+ res.AddErrors(someParametersBrokenMsg(path, method, operationID))
+ // original error from analyzer
+ res.AddErrors(err)
+ return true
+ }) {
+ params = append(params, ppr)
+ }
+ }
+ return
+}
+
+func (h *paramHelper) resolveParam(path, method, operationID string, param *spec.Parameter, s *SpecValidator) (*spec.Parameter, *Result) {
+ // Ensure parameter is expanded
+ var err error
+ res := new(Result)
+ isRef := param.Ref.String() != ""
+ if s.spec.SpecFilePath() == "" {
+ err = spec.ExpandParameterWithRoot(param, s.spec.Spec(), nil)
+ } else {
+ err = spec.ExpandParameter(param, s.spec.SpecFilePath())
+
+ }
+ if err != nil { // Safeguard
+ // NOTE: we may enter here when the whole parameter is an unresolved $ref
+ refPath := strings.Join([]string{"\"" + path + "\"", method}, ".")
+ errorHelp.addPointerError(res, err, param.Ref.String(), refPath)
+ return nil, res
+ }
+ res.Merge(h.checkExpandedParam(param, param.Name, param.In, operationID, isRef))
+ return param, res
+}
+
+func (h *paramHelper) checkExpandedParam(pr *spec.Parameter, path, in, operation string, isRef bool) *Result {
+ // Secure parameter structure after $ref resolution
+ res := new(Result)
+ simpleZero := spec.SimpleSchema{}
+ // Try to explain why... best guess
+ switch {
+ case pr.In == swaggerBody && (pr.SimpleSchema != simpleZero && pr.SimpleSchema.Type != objectType):
+ if isRef {
+ // Most likely, a $ref with a sibling is an unwanted situation: in itself this is a warning...
+ // but we detect it because of the following error:
+ // schema took over Parameter for an unexplained reason
+ res.AddWarnings(refShouldNotHaveSiblingsMsg(path, operation))
+ }
+ res.AddErrors(invalidParameterDefinitionMsg(path, in, operation))
+ case pr.In != swaggerBody && pr.Schema != nil:
+ if isRef {
+ res.AddWarnings(refShouldNotHaveSiblingsMsg(path, operation))
+ }
+ res.AddErrors(invalidParameterDefinitionAsSchemaMsg(path, in, operation))
+ case (pr.In == swaggerBody && pr.Schema == nil) || (pr.In != swaggerBody && pr.SimpleSchema == simpleZero):
+ // Other unexpected mishaps
+ res.AddErrors(invalidParameterDefinitionMsg(path, in, operation))
+ }
+ return res
+}
+
+type responseHelper struct {
+ // A collection of unexported helpers for response resolution
+}
+
+func (r *responseHelper) expandResponseRef(
+ response *spec.Response,
+ path string, s *SpecValidator) (*spec.Response, *Result) {
+ // Ensure response is expanded
+ var err error
+ res := new(Result)
+ if s.spec.SpecFilePath() == "" {
+ // there is no physical document to resolve $ref in response
+ err = spec.ExpandResponseWithRoot(response, s.spec.Spec(), nil)
+ } else {
+ err = spec.ExpandResponse(response, s.spec.SpecFilePath())
+ }
+ if err != nil { // Safeguard
+ // NOTE: we may enter here when the whole response is an unresolved $ref.
+ errorHelp.addPointerError(res, err, response.Ref.String(), path)
+ return nil, res
+ }
+
+ return response, res
+}
+
+func (r *responseHelper) responseMsgVariants(
+ responseType string,
+ responseCode int) (responseName, responseCodeAsStr string) {
+ // Path variants for messages
+ if responseType == jsonDefault {
+ responseCodeAsStr = jsonDefault
+ responseName = "default response"
+ } else {
+ responseCodeAsStr = strconv.Itoa(responseCode)
+ responseName = "response " + responseCodeAsStr
+ }
+ return
+}
diff --git a/vendor/github.com/go-openapi/validate/object_validator.go b/vendor/github.com/go-openapi/validate/object_validator.go
new file mode 100644
index 00000000..dff73fa9
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/object_validator.go
@@ -0,0 +1,431 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+)
+
+type objectValidator struct {
+ Path string
+ In string
+ MaxProperties *int64
+ MinProperties *int64
+ Required []string
+ Properties map[string]spec.Schema
+ AdditionalProperties *spec.SchemaOrBool
+ PatternProperties map[string]spec.Schema
+ Root interface{}
+ KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+ splitPath []string
+}
+
+func newObjectValidator(path, in string,
+ maxProperties, minProperties *int64, required []string, properties spec.SchemaProperties,
+ additionalProperties *spec.SchemaOrBool, patternProperties spec.SchemaProperties,
+ root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *objectValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var v *objectValidator
+ if opts.recycleValidators {
+ v = pools.poolOfObjectValidators.BorrowValidator()
+ } else {
+ v = new(objectValidator)
+ }
+
+ v.Path = path
+ v.In = in
+ v.MaxProperties = maxProperties
+ v.MinProperties = minProperties
+ v.Required = required
+ v.Properties = properties
+ v.AdditionalProperties = additionalProperties
+ v.PatternProperties = patternProperties
+ v.Root = root
+ v.KnownFormats = formats
+ v.Options = opts
+ v.splitPath = strings.Split(v.Path, ".")
+
+ return v
+}
+
+func (o *objectValidator) SetPath(path string) {
+ o.Path = path
+ o.splitPath = strings.Split(path, ".")
+}
+
+func (o *objectValidator) Applies(source interface{}, kind reflect.Kind) bool {
+ // TODO: this should also work for structs
+ // there is a problem in the type validator where it will be unhappy about null values
+ // so that requires more testing
+ _, isSchema := source.(*spec.Schema)
+ return isSchema && (kind == reflect.Map || kind == reflect.Struct)
+}
+
+func (o *objectValidator) isProperties() bool {
+ p := o.splitPath
+ return len(p) > 1 && p[len(p)-1] == jsonProperties && p[len(p)-2] != jsonProperties
+}
+
+func (o *objectValidator) isDefault() bool {
+ p := o.splitPath
+ return len(p) > 1 && p[len(p)-1] == jsonDefault && p[len(p)-2] != jsonDefault
+}
+
+func (o *objectValidator) isExample() bool {
+ p := o.splitPath
+ return len(p) > 1 && (p[len(p)-1] == swaggerExample || p[len(p)-1] == swaggerExamples) && p[len(p)-2] != swaggerExample
+}
+
+func (o *objectValidator) checkArrayMustHaveItems(res *Result, val map[string]interface{}) {
+ // for swagger 2.0 schemas, there is an additional constraint to have array items defined explicitly.
+ // with pure jsonschema draft 4, one may have arrays with undefined items (i.e. any type).
+ if val == nil {
+ return
+ }
+
+ t, typeFound := val[jsonType]
+ if !typeFound {
+ return
+ }
+
+ tpe, isString := t.(string)
+ if !isString || tpe != arrayType {
+ return
+ }
+
+ item, itemsKeyFound := val[jsonItems]
+ if itemsKeyFound {
+ return
+ }
+
+ res.AddErrors(errors.Required(jsonItems, o.Path, item))
+}
+
+func (o *objectValidator) checkItemsMustBeTypeArray(res *Result, val map[string]interface{}) {
+ if val == nil {
+ return
+ }
+
+ if o.isProperties() || o.isDefault() || o.isExample() {
+ return
+ }
+
+ _, itemsKeyFound := val[jsonItems]
+ if !itemsKeyFound {
+ return
+ }
+
+ t, typeFound := val[jsonType]
+ if !typeFound {
+ // there is no type
+ res.AddErrors(errors.Required(jsonType, o.Path, t))
+ }
+
+ if tpe, isString := t.(string); !isString || tpe != arrayType {
+ res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil))
+ }
+}
+
+func (o *objectValidator) precheck(res *Result, val map[string]interface{}) {
+ if o.Options.EnableArrayMustHaveItemsCheck {
+ o.checkArrayMustHaveItems(res, val)
+ }
+ if o.Options.EnableObjectArrayTypeCheck {
+ o.checkItemsMustBeTypeArray(res, val)
+ }
+}
+
+func (o *objectValidator) Validate(data interface{}) *Result {
+ if o.Options.recycleValidators {
+ defer func() {
+ o.redeem()
+ }()
+ }
+
+ var val map[string]interface{}
+ if data != nil {
+ var ok bool
+ val, ok = data.(map[string]interface{})
+ if !ok {
+ return errorHelp.sErr(invalidObjectMsg(o.Path, o.In), o.Options.recycleResult)
+ }
+ }
+ numKeys := int64(len(val))
+
+ if o.MinProperties != nil && numKeys < *o.MinProperties {
+ return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties), o.Options.recycleResult)
+ }
+ if o.MaxProperties != nil && numKeys > *o.MaxProperties {
+ return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties), o.Options.recycleResult)
+ }
+
+ var res *Result
+ if o.Options.recycleResult {
+ res = pools.poolOfResults.BorrowResult()
+ } else {
+ res = new(Result)
+ }
+
+ o.precheck(res, val)
+
+ // check validity of field names
+ if o.AdditionalProperties != nil && !o.AdditionalProperties.Allows {
+ // Case: additionalProperties: false
+ o.validateNoAdditionalProperties(val, res)
+ } else {
+ // Cases: empty additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> }
+ o.validateAdditionalProperties(val, res)
+ }
+
+ o.validatePropertiesSchema(val, res)
+
+ // Check patternProperties
+ // TODO: it looks like we have done that twice in many cases
+ for key, value := range val {
+ _, regularProperty := o.Properties[key]
+ matched, _, patterns := o.validatePatternProperty(key, value, res) // applies to regular properties as well
+ if regularProperty || !matched {
+ continue
+ }
+
+ for _, pName := range patterns {
+ if v, ok := o.PatternProperties[pName]; ok {
+ r := newSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value)
+ res.mergeForField(data.(map[string]interface{}), key, r)
+ }
+ }
+ }
+
+ return res
+}
+
+func (o *objectValidator) validateNoAdditionalProperties(val map[string]interface{}, res *Result) {
+ for k := range val {
+ if k == "$schema" || k == "id" {
+ // special properties "$schema" and "id" are ignored
+ continue
+ }
+
+ _, regularProperty := o.Properties[k]
+ if regularProperty {
+ continue
+ }
+
+ matched := false
+ for pk := range o.PatternProperties {
+ re, err := compileRegexp(pk)
+ if err != nil {
+ continue
+ }
+ if matches := re.MatchString(k); matches {
+ matched = true
+ break
+ }
+ }
+ if matched {
+ continue
+ }
+
+ res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k))
+
+ // BUG(fredbi): This section should move to a part dedicated to spec validation as
+ // it will conflict with regular schemas where a property "headers" is defined.
+
+ //
+ // Croaks a more explicit message on top of the standard one
+ // on some recognized cases.
+ //
+ // NOTE: edge cases with invalid type assertion are simply ignored here.
+ // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered
+ // by higher level callers (the IMPORTANT! tag will be eventually
+ // removed).
+ if k != "headers" || val[k] == nil {
+ continue
+ }
+
+ // $ref is forbidden in header
+ headers, mapOk := val[k].(map[string]interface{})
+ if !mapOk {
+ continue
+ }
+
+ for headerKey, headerBody := range headers {
+ if headerBody == nil {
+ continue
+ }
+
+ headerSchema, mapOfMapOk := headerBody.(map[string]interface{})
+ if !mapOfMapOk {
+ continue
+ }
+
+ _, found := headerSchema["$ref"]
+ if !found {
+ continue
+ }
+
+ refString, stringOk := headerSchema["$ref"].(string)
+ if !stringOk {
+ continue
+ }
+
+ msg := strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "")
+ res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg))
+ /*
+ case "$ref":
+ if val[k] != nil {
+ // TODO: check context of that ref: warn about siblings, check against invalid context
+ }
+ */
+ }
+ }
+}
+
+func (o *objectValidator) validateAdditionalProperties(val map[string]interface{}, res *Result) {
+ for key, value := range val {
+ _, regularProperty := o.Properties[key]
+ if regularProperty {
+ continue
+ }
+
+ // Validates property against "patternProperties" if applicable
+ // BUG(fredbi): succeededOnce is always false
+
+ // NOTE: how about regular properties which do not match patternProperties?
+ matched, succeededOnce, _ := o.validatePatternProperty(key, value, res)
+ if matched || succeededOnce {
+ continue
+ }
+
+ if o.AdditionalProperties == nil || o.AdditionalProperties.Schema == nil {
+ continue
+ }
+
+ // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator
+ // AdditionalProperties as Schema
+ r := newSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value)
+ res.mergeForField(val, key, r)
+ }
+ // Valid cases: additionalProperties: true or undefined
+}
+
+func (o *objectValidator) validatePropertiesSchema(val map[string]interface{}, res *Result) {
+ createdFromDefaults := map[string]struct{}{}
+
+ // Property types:
+ // - regular Property
+ pSchema := pools.poolOfSchemas.BorrowSchema() // recycle a spec.Schema object which lifespan extends only to the validation of properties
+ defer func() {
+ pools.poolOfSchemas.RedeemSchema(pSchema)
+ }()
+
+ for pName := range o.Properties {
+ *pSchema = o.Properties[pName]
+ var rName string
+ if o.Path == "" {
+ rName = pName
+ } else {
+ rName = o.Path + "." + pName
+ }
+
+ // Recursively validates each property against its schema
+ v, ok := val[pName]
+ if ok {
+ r := newSchemaValidator(pSchema, o.Root, rName, o.KnownFormats, o.Options).Validate(v)
+ res.mergeForField(val, pName, r)
+
+ continue
+ }
+
+ if pSchema.Default != nil {
+ // if a default value is defined, creates the property from defaults
+ // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does.
+ createdFromDefaults[pName] = struct{}{}
+ if !o.Options.skipSchemataResult {
+ res.addPropertySchemata(val, pName, pSchema) // this shallow-clones the content of the pSchema pointer
+ }
+ }
+ }
+
+ if len(o.Required) == 0 {
+ return
+ }
+
+ // Check required properties
+ for _, k := range o.Required {
+ v, ok := val[k]
+ if ok {
+ continue
+ }
+ _, isCreatedFromDefaults := createdFromDefaults[k]
+ if isCreatedFromDefaults {
+ continue
+ }
+
+ res.AddErrors(errors.Required(fmt.Sprintf("%s.%s", o.Path, k), o.In, v))
+ }
+}
+
+// TODO: succeededOnce is not used anywhere
+func (o *objectValidator) validatePatternProperty(key string, value interface{}, result *Result) (bool, bool, []string) {
+ if len(o.PatternProperties) == 0 {
+ return false, false, nil
+ }
+
+ matched := false
+ succeededOnce := false
+ patterns := make([]string, 0, len(o.PatternProperties))
+
+ schema := pools.poolOfSchemas.BorrowSchema()
+ defer func() {
+ pools.poolOfSchemas.RedeemSchema(schema)
+ }()
+
+ for k := range o.PatternProperties {
+ re, err := compileRegexp(k)
+ if err != nil {
+ continue
+ }
+
+ match := re.MatchString(key)
+ if !match {
+ continue
+ }
+
+ *schema = o.PatternProperties[k]
+ patterns = append(patterns, k)
+ matched = true
+ validator := newSchemaValidator(schema, o.Root, fmt.Sprintf("%s.%s", o.Path, key), o.KnownFormats, o.Options)
+
+ res := validator.Validate(value)
+ result.Merge(res)
+ }
+
+ return matched, succeededOnce, patterns
+}
+
+func (o *objectValidator) redeem() {
+ pools.poolOfObjectValidators.RedeemValidator(o)
+}
diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go
new file mode 100644
index 00000000..cfe9b066
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/options.go
@@ -0,0 +1,62 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import "sync"
+
+// Opts specifies validation options for a SpecValidator.
+//
+// NOTE: other options might be needed, for example a go-swagger specific mode.
+type Opts struct {
+ ContinueOnErrors bool // true: continue reporting errors, even if spec is invalid
+
+ // StrictPathParamUniqueness enables a strict validation of paths that include
+ // path parameters. When true, it will enforce that for each method, the path
+ // is unique, regardless of path parameters such that GET:/petstore/{id} and
+ // GET:/petstore/{pet} anre considered duplicate paths.
+ //
+ // Consider disabling if path parameters can include slashes such as
+ // GET:/v1/{shelve} and GET:/v1/{book}, where the IDs are "shelve/*" and
+ // /"shelve/*/book/*" respectively.
+ StrictPathParamUniqueness bool
+ SkipSchemataResult bool
+}
+
+var (
+ defaultOpts = Opts{
+ // default is to stop validation on errors
+ ContinueOnErrors: false,
+
+ // StrictPathParamUniqueness is defaulted to true. This maintains existing
+ // behavior.
+ StrictPathParamUniqueness: true,
+ }
+
+ defaultOptsMutex = &sync.Mutex{}
+)
+
+// SetContinueOnErrors sets global default behavior regarding spec validation errors reporting.
+//
+// For extended error reporting, you most likely want to set it to true.
+// For faster validation, it's better to give up early when a spec is detected as invalid: set it to false (this is the default).
+//
+// Setting this mode does NOT affect the validation status.
+//
+// NOTE: this method affects global defaults. It is not suitable for a concurrent usage.
+func SetContinueOnErrors(c bool) {
+ defer defaultOptsMutex.Unlock()
+ defaultOptsMutex.Lock()
+ defaultOpts.ContinueOnErrors = c
+}
diff --git a/vendor/github.com/go-openapi/validate/pools.go b/vendor/github.com/go-openapi/validate/pools.go
new file mode 100644
index 00000000..3ddce4dc
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/pools.go
@@ -0,0 +1,366 @@
+//go:build !validatedebug
+
+package validate
+
+import (
+ "sync"
+
+ "github.com/go-openapi/spec"
+)
+
+var pools allPools
+
+func init() {
+ resetPools()
+}
+
+func resetPools() {
+ // NOTE: for testing purpose, we might want to reset pools after calling Validate twice.
+ // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool
+ // and further calls to Get are mishandled.
+
+ pools = allPools{
+ poolOfSchemaValidators: schemaValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &SchemaValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfObjectValidators: objectValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &objectValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfSliceValidators: sliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaSliceValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfItemsValidators: itemsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &itemsValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfBasicCommonValidators: basicCommonValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicCommonValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfHeaderValidators: headerValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &HeaderValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfParamValidators: paramValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &ParamValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfBasicSliceValidators: basicSliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicSliceValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfNumberValidators: numberValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &numberValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfStringValidators: stringValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &stringValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfSchemaPropsValidators: schemaPropsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaPropsValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfFormatValidators: formatValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &formatValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfTypeValidators: typeValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &typeValidator{}
+
+ return s
+ },
+ },
+ },
+ poolOfSchemas: schemasPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &spec.Schema{}
+
+ return s
+ },
+ },
+ },
+ poolOfResults: resultsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &Result{}
+
+ return s
+ },
+ },
+ },
+ }
+}
+
+type (
+ allPools struct {
+ // memory pools for all validator objects.
+ //
+ // Each pool can be borrowed from and redeemed to.
+ poolOfSchemaValidators schemaValidatorsPool
+ poolOfObjectValidators objectValidatorsPool
+ poolOfSliceValidators sliceValidatorsPool
+ poolOfItemsValidators itemsValidatorsPool
+ poolOfBasicCommonValidators basicCommonValidatorsPool
+ poolOfHeaderValidators headerValidatorsPool
+ poolOfParamValidators paramValidatorsPool
+ poolOfBasicSliceValidators basicSliceValidatorsPool
+ poolOfNumberValidators numberValidatorsPool
+ poolOfStringValidators stringValidatorsPool
+ poolOfSchemaPropsValidators schemaPropsValidatorsPool
+ poolOfFormatValidators formatValidatorsPool
+ poolOfTypeValidators typeValidatorsPool
+ poolOfSchemas schemasPool
+ poolOfResults resultsPool
+ }
+
+ schemaValidatorsPool struct {
+ *sync.Pool
+ }
+
+ objectValidatorsPool struct {
+ *sync.Pool
+ }
+
+ sliceValidatorsPool struct {
+ *sync.Pool
+ }
+
+ itemsValidatorsPool struct {
+ *sync.Pool
+ }
+
+ basicCommonValidatorsPool struct {
+ *sync.Pool
+ }
+
+ headerValidatorsPool struct {
+ *sync.Pool
+ }
+
+ paramValidatorsPool struct {
+ *sync.Pool
+ }
+
+ basicSliceValidatorsPool struct {
+ *sync.Pool
+ }
+
+ numberValidatorsPool struct {
+ *sync.Pool
+ }
+
+ stringValidatorsPool struct {
+ *sync.Pool
+ }
+
+ schemaPropsValidatorsPool struct {
+ *sync.Pool
+ }
+
+ formatValidatorsPool struct {
+ *sync.Pool
+ }
+
+ typeValidatorsPool struct {
+ *sync.Pool
+ }
+
+ schemasPool struct {
+ *sync.Pool
+ }
+
+ resultsPool struct {
+ *sync.Pool
+ }
+)
+
+func (p schemaValidatorsPool) BorrowValidator() *SchemaValidator {
+ return p.Get().(*SchemaValidator)
+}
+
+func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) {
+ // NOTE: s might be nil. In that case, Put is a noop.
+ p.Put(s)
+}
+
+func (p objectValidatorsPool) BorrowValidator() *objectValidator {
+ return p.Get().(*objectValidator)
+}
+
+func (p objectValidatorsPool) RedeemValidator(s *objectValidator) {
+ p.Put(s)
+}
+
+func (p sliceValidatorsPool) BorrowValidator() *schemaSliceValidator {
+ return p.Get().(*schemaSliceValidator)
+}
+
+func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) {
+ p.Put(s)
+}
+
+func (p itemsValidatorsPool) BorrowValidator() *itemsValidator {
+ return p.Get().(*itemsValidator)
+}
+
+func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) {
+ p.Put(s)
+}
+
+func (p basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator {
+ return p.Get().(*basicCommonValidator)
+}
+
+func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) {
+ p.Put(s)
+}
+
+func (p headerValidatorsPool) BorrowValidator() *HeaderValidator {
+ return p.Get().(*HeaderValidator)
+}
+
+func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) {
+ p.Put(s)
+}
+
+func (p paramValidatorsPool) BorrowValidator() *ParamValidator {
+ return p.Get().(*ParamValidator)
+}
+
+func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) {
+ p.Put(s)
+}
+
+func (p basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator {
+ return p.Get().(*basicSliceValidator)
+}
+
+func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) {
+ p.Put(s)
+}
+
+func (p numberValidatorsPool) BorrowValidator() *numberValidator {
+ return p.Get().(*numberValidator)
+}
+
+func (p numberValidatorsPool) RedeemValidator(s *numberValidator) {
+ p.Put(s)
+}
+
+func (p stringValidatorsPool) BorrowValidator() *stringValidator {
+ return p.Get().(*stringValidator)
+}
+
+func (p stringValidatorsPool) RedeemValidator(s *stringValidator) {
+ p.Put(s)
+}
+
+func (p schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator {
+ return p.Get().(*schemaPropsValidator)
+}
+
+func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) {
+ p.Put(s)
+}
+
+func (p formatValidatorsPool) BorrowValidator() *formatValidator {
+ return p.Get().(*formatValidator)
+}
+
+func (p formatValidatorsPool) RedeemValidator(s *formatValidator) {
+ p.Put(s)
+}
+
+func (p typeValidatorsPool) BorrowValidator() *typeValidator {
+ return p.Get().(*typeValidator)
+}
+
+func (p typeValidatorsPool) RedeemValidator(s *typeValidator) {
+ p.Put(s)
+}
+
+func (p schemasPool) BorrowSchema() *spec.Schema {
+ return p.Get().(*spec.Schema)
+}
+
+func (p schemasPool) RedeemSchema(s *spec.Schema) {
+ p.Put(s)
+}
+
+func (p resultsPool) BorrowResult() *Result {
+ return p.Get().(*Result).cleared()
+}
+
+func (p resultsPool) RedeemResult(s *Result) {
+ if s == emptyResult {
+ return
+ }
+ p.Put(s)
+}
diff --git a/vendor/github.com/go-openapi/validate/pools_debug.go b/vendor/github.com/go-openapi/validate/pools_debug.go
new file mode 100644
index 00000000..12949f02
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/pools_debug.go
@@ -0,0 +1,1012 @@
+//go:build validatedebug
+
+package validate
+
+import (
+ "fmt"
+ "runtime"
+ "sync"
+ "testing"
+
+ "github.com/go-openapi/spec"
+)
+
+// This version of the pools is to be used for debugging and testing, with build tag "validatedebug".
+//
+// In this mode, the pools are tracked for allocation and redemption of borrowed objects, so we can
+// verify a few behaviors of the validators. The debug pools panic when an invalid usage pattern is detected.
+
+var pools allPools
+
+func init() {
+ resetPools()
+}
+
+func resetPools() {
+ // NOTE: for testing purpose, we might want to reset pools after calling Validate twice.
+ // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool
+ // and further calls to Get are mishandled.
+
+ pools = allPools{
+ poolOfSchemaValidators: schemaValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &SchemaValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*SchemaValidator]status),
+ allocMap: make(map[*SchemaValidator]string),
+ redeemMap: make(map[*SchemaValidator]string),
+ },
+ poolOfObjectValidators: objectValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &objectValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*objectValidator]status),
+ allocMap: make(map[*objectValidator]string),
+ redeemMap: make(map[*objectValidator]string),
+ },
+ poolOfSliceValidators: sliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaSliceValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*schemaSliceValidator]status),
+ allocMap: make(map[*schemaSliceValidator]string),
+ redeemMap: make(map[*schemaSliceValidator]string),
+ },
+ poolOfItemsValidators: itemsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &itemsValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*itemsValidator]status),
+ allocMap: make(map[*itemsValidator]string),
+ redeemMap: make(map[*itemsValidator]string),
+ },
+ poolOfBasicCommonValidators: basicCommonValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicCommonValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*basicCommonValidator]status),
+ allocMap: make(map[*basicCommonValidator]string),
+ redeemMap: make(map[*basicCommonValidator]string),
+ },
+ poolOfHeaderValidators: headerValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &HeaderValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*HeaderValidator]status),
+ allocMap: make(map[*HeaderValidator]string),
+ redeemMap: make(map[*HeaderValidator]string),
+ },
+ poolOfParamValidators: paramValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &ParamValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*ParamValidator]status),
+ allocMap: make(map[*ParamValidator]string),
+ redeemMap: make(map[*ParamValidator]string),
+ },
+ poolOfBasicSliceValidators: basicSliceValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &basicSliceValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*basicSliceValidator]status),
+ allocMap: make(map[*basicSliceValidator]string),
+ redeemMap: make(map[*basicSliceValidator]string),
+ },
+ poolOfNumberValidators: numberValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &numberValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*numberValidator]status),
+ allocMap: make(map[*numberValidator]string),
+ redeemMap: make(map[*numberValidator]string),
+ },
+ poolOfStringValidators: stringValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &stringValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*stringValidator]status),
+ allocMap: make(map[*stringValidator]string),
+ redeemMap: make(map[*stringValidator]string),
+ },
+ poolOfSchemaPropsValidators: schemaPropsValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &schemaPropsValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*schemaPropsValidator]status),
+ allocMap: make(map[*schemaPropsValidator]string),
+ redeemMap: make(map[*schemaPropsValidator]string),
+ },
+ poolOfFormatValidators: formatValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &formatValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*formatValidator]status),
+ allocMap: make(map[*formatValidator]string),
+ redeemMap: make(map[*formatValidator]string),
+ },
+ poolOfTypeValidators: typeValidatorsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &typeValidator{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*typeValidator]status),
+ allocMap: make(map[*typeValidator]string),
+ redeemMap: make(map[*typeValidator]string),
+ },
+ poolOfSchemas: schemasPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &spec.Schema{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*spec.Schema]status),
+ allocMap: make(map[*spec.Schema]string),
+ redeemMap: make(map[*spec.Schema]string),
+ },
+ poolOfResults: resultsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := &Result{}
+
+ return s
+ },
+ },
+ debugMap: make(map[*Result]status),
+ allocMap: make(map[*Result]string),
+ redeemMap: make(map[*Result]string),
+ },
+ }
+}
+
+const (
+ statusFresh status = iota + 1
+ statusRecycled
+ statusRedeemed
+)
+
+func (s status) String() string {
+ switch s {
+ case statusFresh:
+ return "fresh"
+ case statusRecycled:
+ return "recycled"
+ case statusRedeemed:
+ return "redeemed"
+ default:
+ panic(fmt.Errorf("invalid status: %d", s))
+ }
+}
+
+type (
+ // Debug
+ status uint8
+
+ allPools struct {
+ // memory pools for all validator objects.
+ //
+ // Each pool can be borrowed from and redeemed to.
+ poolOfSchemaValidators schemaValidatorsPool
+ poolOfObjectValidators objectValidatorsPool
+ poolOfSliceValidators sliceValidatorsPool
+ poolOfItemsValidators itemsValidatorsPool
+ poolOfBasicCommonValidators basicCommonValidatorsPool
+ poolOfHeaderValidators headerValidatorsPool
+ poolOfParamValidators paramValidatorsPool
+ poolOfBasicSliceValidators basicSliceValidatorsPool
+ poolOfNumberValidators numberValidatorsPool
+ poolOfStringValidators stringValidatorsPool
+ poolOfSchemaPropsValidators schemaPropsValidatorsPool
+ poolOfFormatValidators formatValidatorsPool
+ poolOfTypeValidators typeValidatorsPool
+ poolOfSchemas schemasPool
+ poolOfResults resultsPool
+ }
+
+ schemaValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*SchemaValidator]status
+ allocMap map[*SchemaValidator]string
+ redeemMap map[*SchemaValidator]string
+ mx sync.Mutex
+ }
+
+ objectValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*objectValidator]status
+ allocMap map[*objectValidator]string
+ redeemMap map[*objectValidator]string
+ mx sync.Mutex
+ }
+
+ sliceValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*schemaSliceValidator]status
+ allocMap map[*schemaSliceValidator]string
+ redeemMap map[*schemaSliceValidator]string
+ mx sync.Mutex
+ }
+
+ itemsValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*itemsValidator]status
+ allocMap map[*itemsValidator]string
+ redeemMap map[*itemsValidator]string
+ mx sync.Mutex
+ }
+
+ basicCommonValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*basicCommonValidator]status
+ allocMap map[*basicCommonValidator]string
+ redeemMap map[*basicCommonValidator]string
+ mx sync.Mutex
+ }
+
+ headerValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*HeaderValidator]status
+ allocMap map[*HeaderValidator]string
+ redeemMap map[*HeaderValidator]string
+ mx sync.Mutex
+ }
+
+ paramValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*ParamValidator]status
+ allocMap map[*ParamValidator]string
+ redeemMap map[*ParamValidator]string
+ mx sync.Mutex
+ }
+
+ basicSliceValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*basicSliceValidator]status
+ allocMap map[*basicSliceValidator]string
+ redeemMap map[*basicSliceValidator]string
+ mx sync.Mutex
+ }
+
+ numberValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*numberValidator]status
+ allocMap map[*numberValidator]string
+ redeemMap map[*numberValidator]string
+ mx sync.Mutex
+ }
+
+ stringValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*stringValidator]status
+ allocMap map[*stringValidator]string
+ redeemMap map[*stringValidator]string
+ mx sync.Mutex
+ }
+
+ schemaPropsValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*schemaPropsValidator]status
+ allocMap map[*schemaPropsValidator]string
+ redeemMap map[*schemaPropsValidator]string
+ mx sync.Mutex
+ }
+
+ formatValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*formatValidator]status
+ allocMap map[*formatValidator]string
+ redeemMap map[*formatValidator]string
+ mx sync.Mutex
+ }
+
+ typeValidatorsPool struct {
+ *sync.Pool
+ debugMap map[*typeValidator]status
+ allocMap map[*typeValidator]string
+ redeemMap map[*typeValidator]string
+ mx sync.Mutex
+ }
+
+ schemasPool struct {
+ *sync.Pool
+ debugMap map[*spec.Schema]status
+ allocMap map[*spec.Schema]string
+ redeemMap map[*spec.Schema]string
+ mx sync.Mutex
+ }
+
+ resultsPool struct {
+ *sync.Pool
+ debugMap map[*Result]status
+ allocMap map[*Result]string
+ redeemMap map[*Result]string
+ mx sync.Mutex
+ }
+)
+
+func (p *schemaValidatorsPool) BorrowValidator() *SchemaValidator {
+ s := p.Get().(*SchemaValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled schema should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *schemaValidatorsPool) RedeemValidator(s *SchemaValidator) {
+ // NOTE: s might be nil. In that case, Put is a noop.
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed schema should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed schema should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *objectValidatorsPool) BorrowValidator() *objectValidator {
+ s := p.Get().(*objectValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled object should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *objectValidatorsPool) RedeemValidator(s *objectValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed object should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed object should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *sliceValidatorsPool) BorrowValidator() *schemaSliceValidator {
+ s := p.Get().(*schemaSliceValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled schemaSliceValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed schemaSliceValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed schemaSliceValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *itemsValidatorsPool) BorrowValidator() *itemsValidator {
+ s := p.Get().(*itemsValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled itemsValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *itemsValidatorsPool) RedeemValidator(s *itemsValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed itemsValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed itemsValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator {
+ s := p.Get().(*basicCommonValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled basicCommonValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed basicCommonValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed basicCommonValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *headerValidatorsPool) BorrowValidator() *HeaderValidator {
+ s := p.Get().(*HeaderValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled HeaderValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *headerValidatorsPool) RedeemValidator(s *HeaderValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed header should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed header should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *paramValidatorsPool) BorrowValidator() *ParamValidator {
+ s := p.Get().(*ParamValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled param should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *paramValidatorsPool) RedeemValidator(s *ParamValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed param should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed param should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator {
+ s := p.Get().(*basicSliceValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled basicSliceValidator should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed basicSliceValidator should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed basicSliceValidator should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *numberValidatorsPool) BorrowValidator() *numberValidator {
+ s := p.Get().(*numberValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled number should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *numberValidatorsPool) RedeemValidator(s *numberValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed number should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed number should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *stringValidatorsPool) BorrowValidator() *stringValidator {
+ s := p.Get().(*stringValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled string should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *stringValidatorsPool) RedeemValidator(s *stringValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed string should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed string should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator {
+ s := p.Get().(*schemaPropsValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled param should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed schemaProps should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed schemaProps should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *formatValidatorsPool) BorrowValidator() *formatValidator {
+ s := p.Get().(*formatValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled format should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *formatValidatorsPool) RedeemValidator(s *formatValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed format should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed format should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *typeValidatorsPool) BorrowValidator() *typeValidator {
+ s := p.Get().(*typeValidator)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled type should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *typeValidatorsPool) RedeemValidator(s *typeValidator) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed type should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic(fmt.Errorf("redeemed type should have been allocated from a fresh or recycled pointer. Got status %s, already redeamed at: %s", x, p.redeemMap[s]))
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *schemasPool) BorrowSchema() *spec.Schema {
+ s := p.Get().(*spec.Schema)
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled spec.Schema should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *schemasPool) RedeemSchema(s *spec.Schema) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed spec.Schema should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed spec.Schema should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *resultsPool) BorrowResult() *Result {
+ s := p.Get().(*Result).cleared()
+
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ p.debugMap[s] = statusFresh
+ } else {
+ if x != statusRedeemed {
+ panic("recycled result should have been redeemed")
+ }
+ p.debugMap[s] = statusRecycled
+ }
+ p.allocMap[s] = caller()
+
+ return s
+}
+
+func (p *resultsPool) RedeemResult(s *Result) {
+ if s == emptyResult {
+ if len(s.Errors) > 0 || len(s.Warnings) > 0 {
+ panic("empty result should not mutate")
+ }
+ return
+ }
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ x, ok := p.debugMap[s]
+ if !ok {
+ panic("redeemed Result should have been allocated")
+ }
+ if x != statusRecycled && x != statusFresh {
+ panic("redeemed Result should have been allocated from a fresh or recycled pointer")
+ }
+ p.debugMap[s] = statusRedeemed
+ p.redeemMap[s] = caller()
+ p.Put(s)
+}
+
+func (p *allPools) allIsRedeemed(t testing.TB) bool {
+ outcome := true
+ for k, v := range p.poolOfSchemaValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("schemaValidator should be redeemed. Allocated by: %s", p.poolOfSchemaValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfObjectValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("objectValidator should be redeemed. Allocated by: %s", p.poolOfObjectValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfSliceValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("sliceValidator should be redeemed. Allocated by: %s", p.poolOfSliceValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfItemsValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("itemsValidator should be redeemed. Allocated by: %s", p.poolOfItemsValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfBasicCommonValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("basicCommonValidator should be redeemed. Allocated by: %s", p.poolOfBasicCommonValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfHeaderValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("headerValidator should be redeemed. Allocated by: %s", p.poolOfHeaderValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfParamValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("paramValidator should be redeemed. Allocated by: %s", p.poolOfParamValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfBasicSliceValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("basicSliceValidator should be redeemed. Allocated by: %s", p.poolOfBasicSliceValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfNumberValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("numberValidator should be redeemed. Allocated by: %s", p.poolOfNumberValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfStringValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("stringValidator should be redeemed. Allocated by: %s", p.poolOfStringValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfSchemaPropsValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("schemaPropsValidator should be redeemed. Allocated by: %s", p.poolOfSchemaPropsValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfFormatValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("formatValidator should be redeemed. Allocated by: %s", p.poolOfFormatValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfTypeValidators.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("typeValidator should be redeemed. Allocated by: %s", p.poolOfTypeValidators.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfSchemas.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("schemas should be redeemed. Allocated by: %s", p.poolOfSchemas.allocMap[k])
+ outcome = false
+ }
+ for k, v := range p.poolOfResults.debugMap {
+ if v == statusRedeemed {
+ continue
+ }
+ t.Logf("result should be redeemed. Allocated by: %s", p.poolOfResults.allocMap[k])
+ outcome = false
+ }
+
+ return outcome
+}
+
+func caller() string {
+ pc, _, _, _ := runtime.Caller(3) //nolint:dogsled
+ from, line := runtime.FuncForPC(pc).FileLine(pc)
+
+ return fmt.Sprintf("%s:%d", from, line)
+}
diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go
new file mode 100644
index 00000000..c80804a9
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/result.go
@@ -0,0 +1,563 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ stderrors "errors"
+ "reflect"
+ "strings"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/spec"
+)
+
+var emptyResult = &Result{MatchCount: 1}
+
+// Result represents a validation result set, composed of
+// errors and warnings.
+//
+// It is used to keep track of all detected errors and warnings during
+// the validation of a specification.
+//
+// Matchcount is used to determine
+// which errors are relevant in the case of AnyOf, OneOf
+// schema validation. Results from the validation branch
+// with most matches get eventually selected.
+//
+// TODO: keep path of key originating the error
+type Result struct {
+ Errors []error
+ Warnings []error
+ MatchCount int
+
+ // the object data
+ data interface{}
+
+ // Schemata for the root object
+ rootObjectSchemata schemata
+ // Schemata for object fields
+ fieldSchemata []fieldSchemata
+ // Schemata for slice items
+ itemSchemata []itemSchemata
+
+ cachedFieldSchemata map[FieldKey][]*spec.Schema
+ cachedItemSchemata map[ItemKey][]*spec.Schema
+
+ wantsRedeemOnMerge bool
+}
+
+// FieldKey is a pair of an object and a field, usable as a key for a map.
+type FieldKey struct {
+ object reflect.Value // actually a map[string]interface{}, but the latter cannot be a key
+ field string
+}
+
+// ItemKey is a pair of a slice and an index, usable as a key for a map.
+type ItemKey struct {
+ slice reflect.Value // actually a []interface{}, but the latter cannot be a key
+ index int
+}
+
+// NewFieldKey returns a pair of an object and field usable as a key of a map.
+func NewFieldKey(obj map[string]interface{}, field string) FieldKey {
+ return FieldKey{object: reflect.ValueOf(obj), field: field}
+}
+
+// Object returns the underlying object of this key.
+func (fk *FieldKey) Object() map[string]interface{} {
+ return fk.object.Interface().(map[string]interface{})
+}
+
+// Field returns the underlying field of this key.
+func (fk *FieldKey) Field() string {
+ return fk.field
+}
+
+// NewItemKey returns a pair of a slice and index usable as a key of a map.
+func NewItemKey(slice interface{}, i int) ItemKey {
+ return ItemKey{slice: reflect.ValueOf(slice), index: i}
+}
+
+// Slice returns the underlying slice of this key.
+func (ik *ItemKey) Slice() []interface{} {
+ return ik.slice.Interface().([]interface{})
+}
+
+// Index returns the underlying index of this key.
+func (ik *ItemKey) Index() int {
+ return ik.index
+}
+
+type fieldSchemata struct {
+ obj map[string]interface{}
+ field string
+ schemata schemata
+}
+
+type itemSchemata struct {
+ slice reflect.Value
+ index int
+ schemata schemata
+}
+
+// Merge merges this result with the other one(s), preserving match counts etc.
+func (r *Result) Merge(others ...*Result) *Result {
+ for _, other := range others {
+ if other == nil {
+ continue
+ }
+ r.mergeWithoutRootSchemata(other)
+ r.rootObjectSchemata.Append(other.rootObjectSchemata)
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
+ }
+ return r
+}
+
+// Data returns the original data object used for validation. Mutating this renders
+// the result invalid.
+func (r *Result) Data() interface{} {
+ return r.data
+}
+
+// RootObjectSchemata returns the schemata which apply to the root object.
+func (r *Result) RootObjectSchemata() []*spec.Schema {
+ return r.rootObjectSchemata.Slice()
+}
+
+// FieldSchemata returns the schemata which apply to fields in objects.
+func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema {
+ if r.cachedFieldSchemata != nil {
+ return r.cachedFieldSchemata
+ }
+
+ ret := make(map[FieldKey][]*spec.Schema, len(r.fieldSchemata))
+ for _, fs := range r.fieldSchemata {
+ key := NewFieldKey(fs.obj, fs.field)
+ if fs.schemata.one != nil {
+ ret[key] = append(ret[key], fs.schemata.one)
+ } else if len(fs.schemata.multiple) > 0 {
+ ret[key] = append(ret[key], fs.schemata.multiple...)
+ }
+ }
+ r.cachedFieldSchemata = ret
+
+ return ret
+}
+
+// ItemSchemata returns the schemata which apply to items in slices.
+func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema {
+ if r.cachedItemSchemata != nil {
+ return r.cachedItemSchemata
+ }
+
+ ret := make(map[ItemKey][]*spec.Schema, len(r.itemSchemata))
+ for _, ss := range r.itemSchemata {
+ key := NewItemKey(ss.slice, ss.index)
+ if ss.schemata.one != nil {
+ ret[key] = append(ret[key], ss.schemata.one)
+ } else if len(ss.schemata.multiple) > 0 {
+ ret[key] = append(ret[key], ss.schemata.multiple...)
+ }
+ }
+ r.cachedItemSchemata = ret
+ return ret
+}
+
+func (r *Result) resetCaches() {
+ r.cachedFieldSchemata = nil
+ r.cachedItemSchemata = nil
+}
+
+// mergeForField merges other into r, assigning other's root schemata to the given Object and field name.
+//
+//nolint:unparam
+func (r *Result) mergeForField(obj map[string]interface{}, field string, other *Result) *Result {
+ if other == nil {
+ return r
+ }
+ r.mergeWithoutRootSchemata(other)
+
+ if other.rootObjectSchemata.Len() > 0 {
+ if r.fieldSchemata == nil {
+ r.fieldSchemata = make([]fieldSchemata, len(obj))
+ }
+ // clone other schemata, as other is about to be redeemed to the pool
+ r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{
+ obj: obj,
+ field: field,
+ schemata: other.rootObjectSchemata.Clone(),
+ })
+ }
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
+
+ return r
+}
+
+// mergeForSlice merges other into r, assigning other's root schemata to the given slice and index.
+//
+//nolint:unparam
+func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Result {
+ if other == nil {
+ return r
+ }
+ r.mergeWithoutRootSchemata(other)
+
+ if other.rootObjectSchemata.Len() > 0 {
+ if r.itemSchemata == nil {
+ r.itemSchemata = make([]itemSchemata, slice.Len())
+ }
+ // clone other schemata, as other is about to be redeemed to the pool
+ r.itemSchemata = append(r.itemSchemata, itemSchemata{
+ slice: slice,
+ index: i,
+ schemata: other.rootObjectSchemata.Clone(),
+ })
+ }
+
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
+
+ return r
+}
+
+// addRootObjectSchemata adds the given schemata for the root object of the result.
+//
+// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result.
+func (r *Result) addRootObjectSchemata(s *spec.Schema) {
+ clone := *s
+ r.rootObjectSchemata.Append(schemata{one: &clone})
+}
+
+// addPropertySchemata adds the given schemata for the object and field.
+//
+// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result.
+func (r *Result) addPropertySchemata(obj map[string]interface{}, fld string, schema *spec.Schema) {
+ if r.fieldSchemata == nil {
+ r.fieldSchemata = make([]fieldSchemata, 0, len(obj))
+ }
+ clone := *schema
+ r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: &clone}})
+}
+
+/*
+// addSliceSchemata adds the given schemata for the slice and index.
+// The slice schemata might be reused. I.e. do not modify it after being added to a result.
+func (r *Result) addSliceSchemata(slice reflect.Value, i int, schema *spec.Schema) {
+ if r.itemSchemata == nil {
+ r.itemSchemata = make([]itemSchemata, 0, slice.Len())
+ }
+ r.itemSchemata = append(r.itemSchemata, itemSchemata{slice: slice, index: i, schemata: schemata{one: schema}})
+}
+*/
+
+// mergeWithoutRootSchemata merges other into r, ignoring the rootObject schemata.
+func (r *Result) mergeWithoutRootSchemata(other *Result) {
+ r.resetCaches()
+ r.AddErrors(other.Errors...)
+ r.AddWarnings(other.Warnings...)
+ r.MatchCount += other.MatchCount
+
+ if other.fieldSchemata != nil {
+ if r.fieldSchemata == nil {
+ r.fieldSchemata = make([]fieldSchemata, 0, len(other.fieldSchemata))
+ }
+ for _, field := range other.fieldSchemata {
+ field.schemata = field.schemata.Clone()
+ r.fieldSchemata = append(r.fieldSchemata, field)
+ }
+ }
+
+ if other.itemSchemata != nil {
+ if r.itemSchemata == nil {
+ r.itemSchemata = make([]itemSchemata, 0, len(other.itemSchemata))
+ }
+ for _, field := range other.itemSchemata {
+ field.schemata = field.schemata.Clone()
+ r.itemSchemata = append(r.itemSchemata, field)
+ }
+ }
+}
+
+// MergeAsErrors merges this result with the other one(s), preserving match counts etc.
+//
+// Warnings from input are merged as Errors in the returned merged Result.
+func (r *Result) MergeAsErrors(others ...*Result) *Result {
+ for _, other := range others {
+ if other != nil {
+ r.resetCaches()
+ r.AddErrors(other.Errors...)
+ r.AddErrors(other.Warnings...)
+ r.MatchCount += other.MatchCount
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
+ }
+ }
+ return r
+}
+
+// MergeAsWarnings merges this result with the other one(s), preserving match counts etc.
+//
+// Errors from input are merged as Warnings in the returned merged Result.
+func (r *Result) MergeAsWarnings(others ...*Result) *Result {
+ for _, other := range others {
+ if other != nil {
+ r.resetCaches()
+ r.AddWarnings(other.Errors...)
+ r.AddWarnings(other.Warnings...)
+ r.MatchCount += other.MatchCount
+ if other.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(other)
+ }
+ }
+ }
+ return r
+}
+
+// AddErrors adds errors to this validation result (if not already reported).
+//
+// Since the same check may be passed several times while exploring the
+// spec structure (via $ref, ...) reported messages are kept
+// unique.
+func (r *Result) AddErrors(errors ...error) {
+ for _, e := range errors {
+ found := false
+ if e != nil {
+ for _, isReported := range r.Errors {
+ if e.Error() == isReported.Error() {
+ found = true
+ break
+ }
+ }
+ if !found {
+ r.Errors = append(r.Errors, e)
+ }
+ }
+ }
+}
+
+// AddWarnings adds warnings to this validation result (if not already reported).
+func (r *Result) AddWarnings(warnings ...error) {
+ for _, e := range warnings {
+ found := false
+ if e != nil {
+ for _, isReported := range r.Warnings {
+ if e.Error() == isReported.Error() {
+ found = true
+ break
+ }
+ }
+ if !found {
+ r.Warnings = append(r.Warnings, e)
+ }
+ }
+ }
+}
+
+func (r *Result) keepRelevantErrors() *Result {
+ // TODO: this one is going to disapear...
+ // keepRelevantErrors strips a result from standard errors and keeps
+ // the ones which are supposedly more accurate.
+ //
+ // The original result remains unaffected (creates a new instance of Result).
+ // This method is used to work around the "matchCount" filter which would otherwise
+ // strip our result from some accurate error reporting from lower level validators.
+ //
+ // NOTE: this implementation with a placeholder (IMPORTANT!) is neither clean nor
+ // very efficient. On the other hand, relying on go-openapi/errors to manipulate
+ // codes would require to change a lot here. So, for the moment, let's go with
+ // placeholders.
+ strippedErrors := []error{}
+ for _, e := range r.Errors {
+ if strings.HasPrefix(e.Error(), "IMPORTANT!") {
+ strippedErrors = append(strippedErrors, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
+ }
+ }
+ strippedWarnings := []error{}
+ for _, e := range r.Warnings {
+ if strings.HasPrefix(e.Error(), "IMPORTANT!") {
+ strippedWarnings = append(strippedWarnings, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
+ }
+ }
+ var strippedResult *Result
+ if r.wantsRedeemOnMerge {
+ strippedResult = pools.poolOfResults.BorrowResult()
+ } else {
+ strippedResult = new(Result)
+ }
+ strippedResult.Errors = strippedErrors
+ strippedResult.Warnings = strippedWarnings
+ return strippedResult
+}
+
+// IsValid returns true when this result is valid.
+//
+// Returns true on a nil *Result.
+func (r *Result) IsValid() bool {
+ if r == nil {
+ return true
+ }
+ return len(r.Errors) == 0
+}
+
+// HasErrors returns true when this result is invalid.
+//
+// Returns false on a nil *Result.
+func (r *Result) HasErrors() bool {
+ if r == nil {
+ return false
+ }
+ return !r.IsValid()
+}
+
+// HasWarnings returns true when this result contains warnings.
+//
+// Returns false on a nil *Result.
+func (r *Result) HasWarnings() bool {
+ if r == nil {
+ return false
+ }
+ return len(r.Warnings) > 0
+}
+
+// HasErrorsOrWarnings returns true when this result contains
+// either errors or warnings.
+//
+// Returns false on a nil *Result.
+func (r *Result) HasErrorsOrWarnings() bool {
+ if r == nil {
+ return false
+ }
+ return len(r.Errors) > 0 || len(r.Warnings) > 0
+}
+
+// Inc increments the match count
+func (r *Result) Inc() {
+ r.MatchCount++
+}
+
+// AsError renders this result as an error interface
+//
+// TODO: reporting / pretty print with path ordered and indented
+func (r *Result) AsError() error {
+ if r.IsValid() {
+ return nil
+ }
+ return errors.CompositeValidationError(r.Errors...)
+}
+
+func (r *Result) cleared() *Result {
+ // clear the Result to be reusable. Keep allocated capacity.
+ r.Errors = r.Errors[:0]
+ r.Warnings = r.Warnings[:0]
+ r.MatchCount = 0
+ r.data = nil
+ r.rootObjectSchemata.one = nil
+ r.rootObjectSchemata.multiple = r.rootObjectSchemata.multiple[:0]
+ r.fieldSchemata = r.fieldSchemata[:0]
+ r.itemSchemata = r.itemSchemata[:0]
+ for k := range r.cachedFieldSchemata {
+ delete(r.cachedFieldSchemata, k)
+ }
+ for k := range r.cachedItemSchemata {
+ delete(r.cachedItemSchemata, k)
+ }
+ r.wantsRedeemOnMerge = true // mark this result as eligible for redeem when merged into another
+
+ return r
+}
+
+// schemata is an arbitrary number of schemata. It does a distinction between zero,
+// one and many schemata to avoid slice allocations.
+type schemata struct {
+ // one is set if there is exactly one schema. In that case multiple must be nil.
+ one *spec.Schema
+ // multiple is an arbitrary number of schemas. If it is set, one must be nil.
+ multiple []*spec.Schema
+}
+
+func (s *schemata) Len() int {
+ if s.one != nil {
+ return 1
+ }
+ return len(s.multiple)
+}
+
+func (s *schemata) Slice() []*spec.Schema {
+ if s == nil {
+ return nil
+ }
+ if s.one != nil {
+ return []*spec.Schema{s.one}
+ }
+ return s.multiple
+}
+
+// appendSchemata appends the schemata in other to s. It mutates s in-place.
+func (s *schemata) Append(other schemata) {
+ if other.one == nil && len(other.multiple) == 0 {
+ return
+ }
+ if s.one == nil && len(s.multiple) == 0 {
+ *s = other
+ return
+ }
+
+ if s.one != nil {
+ if other.one != nil {
+ s.multiple = []*spec.Schema{s.one, other.one}
+ } else {
+ t := make([]*spec.Schema, 0, 1+len(other.multiple))
+ s.multiple = append(append(t, s.one), other.multiple...)
+ }
+ s.one = nil
+ } else {
+ if other.one != nil {
+ s.multiple = append(s.multiple, other.one)
+ } else {
+ if cap(s.multiple) >= len(s.multiple)+len(other.multiple) {
+ s.multiple = append(s.multiple, other.multiple...)
+ } else {
+ t := make([]*spec.Schema, 0, len(s.multiple)+len(other.multiple))
+ s.multiple = append(append(t, s.multiple...), other.multiple...)
+ }
+ }
+ }
+}
+
+func (s schemata) Clone() schemata {
+ var clone schemata
+
+ if s.one != nil {
+ clone.one = new(spec.Schema)
+ *clone.one = *s.one
+ }
+
+ if len(s.multiple) > 0 {
+ clone.multiple = make([]*spec.Schema, len(s.multiple))
+ for idx := 0; idx < len(s.multiple); idx++ {
+ sp := new(spec.Schema)
+ *sp = *s.multiple[idx]
+ clone.multiple[idx] = sp
+ }
+ }
+
+ return clone
+}
diff --git a/vendor/github.com/go-openapi/validate/rexp.go b/vendor/github.com/go-openapi/validate/rexp.go
new file mode 100644
index 00000000..76de03e1
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/rexp.go
@@ -0,0 +1,71 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ re "regexp"
+ "sync"
+ "sync/atomic"
+)
+
+// Cache for compiled regular expressions
+var (
+ cacheMutex = &sync.Mutex{}
+ reDict = atomic.Value{} // map[string]*re.Regexp
+)
+
+func compileRegexp(pattern string) (*re.Regexp, error) {
+ if cache, ok := reDict.Load().(map[string]*re.Regexp); ok {
+ if r := cache[pattern]; r != nil {
+ return r, nil
+ }
+ }
+
+ r, err := re.Compile(pattern)
+ if err != nil {
+ return nil, err
+ }
+ cacheRegexp(r)
+ return r, nil
+}
+
+func mustCompileRegexp(pattern string) *re.Regexp {
+ if cache, ok := reDict.Load().(map[string]*re.Regexp); ok {
+ if r := cache[pattern]; r != nil {
+ return r
+ }
+ }
+
+ r := re.MustCompile(pattern)
+ cacheRegexp(r)
+ return r
+}
+
+func cacheRegexp(r *re.Regexp) {
+ cacheMutex.Lock()
+ defer cacheMutex.Unlock()
+
+ if cache, ok := reDict.Load().(map[string]*re.Regexp); !ok || cache[r.String()] == nil {
+ newCache := map[string]*re.Regexp{
+ r.String(): r,
+ }
+
+ for k, v := range cache {
+ newCache[k] = v
+ }
+
+ reDict.Store(newCache)
+ }
+}
diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go
new file mode 100644
index 00000000..db65264f
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/schema.go
@@ -0,0 +1,354 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "encoding/json"
+ "reflect"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// SchemaValidator validates data against a JSON schema
+type SchemaValidator struct {
+ Path string
+ in string
+ Schema *spec.Schema
+ validators [8]valueValidator
+ Root interface{}
+ KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+// AgainstSchema validates the specified data against the provided schema, using a registry of supported formats.
+//
+// When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example.
+func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registry, options ...Option) error {
+ res := NewSchemaValidator(schema, nil, "", formats,
+ append(options, WithRecycleValidators(true), withRecycleResults(true))...,
+ ).Validate(data)
+ defer func() {
+ pools.poolOfResults.RedeemResult(res)
+ }()
+
+ if res.HasErrors() {
+ return errors.CompositeValidationError(res.Errors...)
+ }
+
+ return nil
+}
+
+// NewSchemaValidator creates a new schema validator.
+//
+// Panics if the provided schema is invalid.
+func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...Option) *SchemaValidator {
+ opts := new(SchemaValidatorOptions)
+ for _, o := range options {
+ o(opts)
+ }
+
+ return newSchemaValidator(schema, rootSchema, root, formats, opts)
+}
+
+func newSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts *SchemaValidatorOptions) *SchemaValidator {
+ if schema == nil {
+ return nil
+ }
+
+ if rootSchema == nil {
+ rootSchema = schema
+ }
+
+ if schema.ID != "" || schema.Ref.String() != "" || schema.Ref.IsRoot() {
+ err := spec.ExpandSchema(schema, rootSchema, nil)
+ if err != nil {
+ msg := invalidSchemaProvidedMsg(err).Error()
+ panic(msg)
+ }
+ }
+
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var s *SchemaValidator
+ if opts.recycleValidators {
+ s = pools.poolOfSchemaValidators.BorrowValidator()
+ } else {
+ s = new(SchemaValidator)
+ }
+
+ s.Path = root
+ s.in = "body"
+ s.Schema = schema
+ s.Root = rootSchema
+ s.Options = opts
+ s.KnownFormats = formats
+
+ s.validators = [8]valueValidator{
+ s.typeValidator(),
+ s.schemaPropsValidator(),
+ s.stringValidator(),
+ s.formatValidator(),
+ s.numberValidator(),
+ s.sliceValidator(),
+ s.commonValidator(),
+ s.objectValidator(),
+ }
+
+ return s
+}
+
+// SetPath sets the path for this schema valdiator
+func (s *SchemaValidator) SetPath(path string) {
+ s.Path = path
+}
+
+// Applies returns true when this schema validator applies
+func (s *SchemaValidator) Applies(source interface{}, _ reflect.Kind) bool {
+ _, ok := source.(*spec.Schema)
+ return ok
+}
+
+// Validate validates the data against the schema
+func (s *SchemaValidator) Validate(data interface{}) *Result {
+ if s == nil {
+ return emptyResult
+ }
+
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeemChildren()
+ s.redeem() // one-time use validator
+ }()
+ }
+
+ var result *Result
+ if s.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ result.data = data
+ } else {
+ result = &Result{data: data}
+ }
+
+ if s.Schema != nil && !s.Options.skipSchemataResult {
+ result.addRootObjectSchemata(s.Schema)
+ }
+
+ if data == nil {
+ // early exit with minimal validation
+ result.Merge(s.validators[0].Validate(data)) // type validator
+ result.Merge(s.validators[6].Validate(data)) // common validator
+
+ if s.Options.recycleValidators {
+ s.validators[0] = nil
+ s.validators[6] = nil
+ }
+
+ return result
+ }
+
+ tpe := reflect.TypeOf(data)
+ kind := tpe.Kind()
+ for kind == reflect.Ptr {
+ tpe = tpe.Elem()
+ kind = tpe.Kind()
+ }
+ d := data
+
+ if kind == reflect.Struct {
+ // NOTE: since reflect retrieves the true nature of types
+ // this means that all strfmt types passed here (e.g. strfmt.Datetime, etc..)
+ // are converted here to strings, and structs are systematically converted
+ // to map[string]interface{}.
+ d = swag.ToDynamicJSON(data)
+ }
+
+ // TODO: this part should be handed over to type validator
+ // Handle special case of json.Number data (number marshalled as string)
+ isnumber := s.Schema.Type.Contains(numberType) || s.Schema.Type.Contains(integerType)
+ if num, ok := data.(json.Number); ok && isnumber {
+ if s.Schema.Type.Contains(integerType) { // avoid lossy conversion
+ in, erri := num.Int64()
+ if erri != nil {
+ result.AddErrors(invalidTypeConversionMsg(s.Path, erri))
+ result.Inc()
+
+ return result
+ }
+ d = in
+ } else {
+ nf, errf := num.Float64()
+ if errf != nil {
+ result.AddErrors(invalidTypeConversionMsg(s.Path, errf))
+ result.Inc()
+
+ return result
+ }
+ d = nf
+ }
+
+ tpe = reflect.TypeOf(d)
+ kind = tpe.Kind()
+ }
+
+ for idx, v := range s.validators {
+ if !v.Applies(s.Schema, kind) {
+ if s.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := v.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := v.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ s.validators[idx] = nil // prevents further (unsafe) usage
+ }
+
+ continue
+ }
+
+ result.Merge(v.Validate(d))
+ if s.Options.recycleValidators {
+ s.validators[idx] = nil // prevents further (unsafe) usage
+ }
+ result.Inc()
+ }
+ result.Inc()
+
+ return result
+}
+
+func (s *SchemaValidator) typeValidator() valueValidator {
+ return newTypeValidator(
+ s.Path,
+ s.in,
+ s.Schema.Type,
+ s.Schema.Nullable,
+ s.Schema.Format,
+ s.Options,
+ )
+}
+
+func (s *SchemaValidator) commonValidator() valueValidator {
+ return newBasicCommonValidator(
+ s.Path,
+ s.in,
+ s.Schema.Default,
+ s.Schema.Enum,
+ s.Options,
+ )
+}
+
+func (s *SchemaValidator) sliceValidator() valueValidator {
+ return newSliceValidator(
+ s.Path,
+ s.in,
+ s.Schema.MaxItems,
+ s.Schema.MinItems,
+ s.Schema.UniqueItems,
+ s.Schema.AdditionalItems,
+ s.Schema.Items,
+ s.Root,
+ s.KnownFormats,
+ s.Options,
+ )
+}
+
+func (s *SchemaValidator) numberValidator() valueValidator {
+ return newNumberValidator(
+ s.Path,
+ s.in,
+ s.Schema.Default,
+ s.Schema.MultipleOf,
+ s.Schema.Maximum,
+ s.Schema.ExclusiveMaximum,
+ s.Schema.Minimum,
+ s.Schema.ExclusiveMinimum,
+ "",
+ "",
+ s.Options,
+ )
+}
+
+func (s *SchemaValidator) stringValidator() valueValidator {
+ return newStringValidator(
+ s.Path,
+ s.in,
+ nil,
+ false,
+ false,
+ s.Schema.MaxLength,
+ s.Schema.MinLength,
+ s.Schema.Pattern,
+ s.Options,
+ )
+}
+
+func (s *SchemaValidator) formatValidator() valueValidator {
+ return newFormatValidator(
+ s.Path,
+ s.in,
+ s.Schema.Format,
+ s.KnownFormats,
+ s.Options,
+ )
+}
+
+func (s *SchemaValidator) schemaPropsValidator() valueValidator {
+ sch := s.Schema
+ return newSchemaPropsValidator(
+ s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats,
+ s.Options,
+ )
+}
+
+func (s *SchemaValidator) objectValidator() valueValidator {
+ return newObjectValidator(
+ s.Path,
+ s.in,
+ s.Schema.MaxProperties,
+ s.Schema.MinProperties,
+ s.Schema.Required,
+ s.Schema.Properties,
+ s.Schema.AdditionalProperties,
+ s.Schema.PatternProperties,
+ s.Root,
+ s.KnownFormats,
+ s.Options,
+ )
+}
+
+func (s *SchemaValidator) redeem() {
+ pools.poolOfSchemaValidators.RedeemValidator(s)
+}
+
+func (s *SchemaValidator) redeemChildren() {
+ for i, validator := range s.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ s.validators[i] = nil // free up allocated children if not in pool
+ }
+}
diff --git a/vendor/github.com/go-openapi/validate/schema_messages.go b/vendor/github.com/go-openapi/validate/schema_messages.go
new file mode 100644
index 00000000..786e2e35
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/schema_messages.go
@@ -0,0 +1,78 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "github.com/go-openapi/errors"
+)
+
+// Error messages related to schema validation and returned as results.
+const (
+ // ArrayDoesNotAllowAdditionalItemsError when an additionalItems construct is not verified by the array values provided.
+ //
+ // TODO: should move to package go-openapi/errors
+ ArrayDoesNotAllowAdditionalItemsError = "array doesn't allow for additional items"
+
+ // HasDependencyError indicates that a dependencies construct was not verified
+ HasDependencyError = "%q has a dependency on %s"
+
+ // InvalidSchemaProvidedError indicates that the schema provided to validate a value cannot be properly compiled
+ InvalidSchemaProvidedError = "Invalid schema provided to SchemaValidator: %v"
+
+ // InvalidTypeConversionError indicates that a numerical conversion for the given type could not be carried on
+ InvalidTypeConversionError = "invalid type conversion in %s: %v "
+
+ // MustValidateAtLeastOneSchemaError indicates that in a AnyOf construct, none of the schema constraints specified were verified
+ MustValidateAtLeastOneSchemaError = "%q must validate at least one schema (anyOf)"
+
+ // MustValidateOnlyOneSchemaError indicates that in a OneOf construct, either none of the schema constraints specified were verified, or several were
+ MustValidateOnlyOneSchemaError = "%q must validate one and only one schema (oneOf). %s"
+
+ // MustValidateAllSchemasError indicates that in a AllOf construct, at least one of the schema constraints specified were not verified
+ //
+ // TODO: punctuation in message
+ MustValidateAllSchemasError = "%q must validate all the schemas (allOf)%s"
+
+ // MustNotValidateSchemaError indicates that in a Not construct, the schema constraint specified was verified
+ MustNotValidateSchemaError = "%q must not validate the schema (not)"
+)
+
+// Warning messages related to schema validation and returned as results
+const ()
+
+func invalidSchemaProvidedMsg(err error) errors.Error {
+ return errors.New(InternalErrorCode, InvalidSchemaProvidedError, err)
+}
+func invalidTypeConversionMsg(path string, err error) errors.Error {
+ return errors.New(errors.CompositeErrorCode, InvalidTypeConversionError, path, err)
+}
+func mustValidateOnlyOneSchemaMsg(path, additionalMsg string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, MustValidateOnlyOneSchemaError, path, additionalMsg)
+}
+func mustValidateAtLeastOneSchemaMsg(path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, MustValidateAtLeastOneSchemaError, path)
+}
+func mustValidateAllSchemasMsg(path, additionalMsg string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, MustValidateAllSchemasError, path, additionalMsg)
+}
+func mustNotValidatechemaMsg(path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, MustNotValidateSchemaError, path)
+}
+func hasADependencyMsg(path, depkey string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, HasDependencyError, path, depkey)
+}
+func arrayDoesNotAllowAdditionalItemsMsg() errors.Error {
+ return errors.New(errors.CompositeErrorCode, ArrayDoesNotAllowAdditionalItemsError)
+}
diff --git a/vendor/github.com/go-openapi/validate/schema_option.go b/vendor/github.com/go-openapi/validate/schema_option.go
new file mode 100644
index 00000000..65eeebea
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/schema_option.go
@@ -0,0 +1,83 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+// SchemaValidatorOptions defines optional rules for schema validation
+type SchemaValidatorOptions struct {
+ EnableObjectArrayTypeCheck bool
+ EnableArrayMustHaveItemsCheck bool
+ recycleValidators bool
+ recycleResult bool
+ skipSchemataResult bool
+}
+
+// Option sets optional rules for schema validation
+type Option func(*SchemaValidatorOptions)
+
+// EnableObjectArrayTypeCheck activates the swagger rule: an items must be in type: array
+func EnableObjectArrayTypeCheck(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.EnableObjectArrayTypeCheck = enable
+ }
+}
+
+// EnableArrayMustHaveItemsCheck activates the swagger rule: an array must have items defined
+func EnableArrayMustHaveItemsCheck(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.EnableArrayMustHaveItemsCheck = enable
+ }
+}
+
+// SwaggerSchema activates swagger schema validation rules
+func SwaggerSchema(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.EnableObjectArrayTypeCheck = enable
+ svo.EnableArrayMustHaveItemsCheck = enable
+ }
+}
+
+// WithRecycleValidators saves memory allocations and makes validators
+// available for a single use of Validate() only.
+//
+// When a validator is recycled, called MUST not call the Validate() method twice.
+func WithRecycleValidators(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.recycleValidators = enable
+ }
+}
+
+func withRecycleResults(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.recycleResult = enable
+ }
+}
+
+// WithSkipSchemataResult skips the deep audit payload stored in validation Result
+func WithSkipSchemataResult(enable bool) Option {
+ return func(svo *SchemaValidatorOptions) {
+ svo.skipSchemataResult = enable
+ }
+}
+
+// Options returns the current set of options
+func (svo SchemaValidatorOptions) Options() []Option {
+ return []Option{
+ EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck),
+ EnableArrayMustHaveItemsCheck(svo.EnableArrayMustHaveItemsCheck),
+ WithRecycleValidators(svo.recycleValidators),
+ withRecycleResults(svo.recycleResult),
+ WithSkipSchemataResult(svo.skipSchemataResult),
+ }
+}
diff --git a/vendor/github.com/go-openapi/validate/schema_props.go b/vendor/github.com/go-openapi/validate/schema_props.go
new file mode 100644
index 00000000..1ca37924
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/schema_props.go
@@ -0,0 +1,356 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+)
+
+type schemaPropsValidator struct {
+ Path string
+ In string
+ AllOf []spec.Schema
+ OneOf []spec.Schema
+ AnyOf []spec.Schema
+ Not *spec.Schema
+ Dependencies spec.Dependencies
+ anyOfValidators []*SchemaValidator
+ allOfValidators []*SchemaValidator
+ oneOfValidators []*SchemaValidator
+ notValidator *SchemaValidator
+ Root interface{}
+ KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+func (s *schemaPropsValidator) SetPath(path string) {
+ s.Path = path
+}
+
+func newSchemaPropsValidator(
+ path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry,
+ opts *SchemaValidatorOptions) *schemaPropsValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ anyValidators := make([]*SchemaValidator, 0, len(anyOf))
+ for i := range anyOf {
+ anyValidators = append(anyValidators, newSchemaValidator(&anyOf[i], root, path, formats, opts))
+ }
+ allValidators := make([]*SchemaValidator, 0, len(allOf))
+ for i := range allOf {
+ allValidators = append(allValidators, newSchemaValidator(&allOf[i], root, path, formats, opts))
+ }
+ oneValidators := make([]*SchemaValidator, 0, len(oneOf))
+ for i := range oneOf {
+ oneValidators = append(oneValidators, newSchemaValidator(&oneOf[i], root, path, formats, opts))
+ }
+
+ var notValidator *SchemaValidator
+ if not != nil {
+ notValidator = newSchemaValidator(not, root, path, formats, opts)
+ }
+
+ var s *schemaPropsValidator
+ if opts.recycleValidators {
+ s = pools.poolOfSchemaPropsValidators.BorrowValidator()
+ } else {
+ s = new(schemaPropsValidator)
+ }
+
+ s.Path = path
+ s.In = in
+ s.AllOf = allOf
+ s.OneOf = oneOf
+ s.AnyOf = anyOf
+ s.Not = not
+ s.Dependencies = deps
+ s.anyOfValidators = anyValidators
+ s.allOfValidators = allValidators
+ s.oneOfValidators = oneValidators
+ s.notValidator = notValidator
+ s.Root = root
+ s.KnownFormats = formats
+ s.Options = opts
+
+ return s
+}
+
+func (s *schemaPropsValidator) Applies(source interface{}, _ reflect.Kind) bool {
+ _, isSchema := source.(*spec.Schema)
+ return isSchema
+}
+
+func (s *schemaPropsValidator) Validate(data interface{}) *Result {
+ var mainResult *Result
+ if s.Options.recycleResult {
+ mainResult = pools.poolOfResults.BorrowResult()
+ } else {
+ mainResult = new(Result)
+ }
+
+ // Intermediary error results
+
+ // IMPORTANT! messages from underlying validators
+ var keepResultAnyOf, keepResultOneOf, keepResultAllOf *Result
+
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeemChildren()
+ s.redeem()
+
+ // results are redeemed when merged
+ }()
+ }
+
+ if len(s.anyOfValidators) > 0 {
+ keepResultAnyOf = pools.poolOfResults.BorrowResult()
+ s.validateAnyOf(data, mainResult, keepResultAnyOf)
+ }
+
+ if len(s.oneOfValidators) > 0 {
+ keepResultOneOf = pools.poolOfResults.BorrowResult()
+ s.validateOneOf(data, mainResult, keepResultOneOf)
+ }
+
+ if len(s.allOfValidators) > 0 {
+ keepResultAllOf = pools.poolOfResults.BorrowResult()
+ s.validateAllOf(data, mainResult, keepResultAllOf)
+ }
+
+ if s.notValidator != nil {
+ s.validateNot(data, mainResult)
+ }
+
+ if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map {
+ s.validateDependencies(data, mainResult)
+ }
+
+ mainResult.Inc()
+
+ // In the end we retain best failures for schema validation
+ // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!).
+ return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf)
+}
+
+func (s *schemaPropsValidator) validateAnyOf(data interface{}, mainResult, keepResultAnyOf *Result) {
+ // Validates at least one in anyOf schemas
+ var bestFailures *Result
+
+ for i, anyOfSchema := range s.anyOfValidators {
+ result := anyOfSchema.Validate(data)
+ if s.Options.recycleValidators {
+ s.anyOfValidators[i] = nil
+ }
+ // We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ keepResultAnyOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result
+
+ if result.IsValid() {
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
+ }
+
+ _ = keepResultAnyOf.cleared()
+ mainResult.Merge(result)
+
+ return
+ }
+
+ // MatchCount is used to select errors from the schema with most positive checks
+ if bestFailures == nil || result.MatchCount > bestFailures.MatchCount {
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
+ }
+ bestFailures = result
+
+ continue
+ }
+
+ if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
+ }
+ }
+
+ mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path))
+ mainResult.Merge(bestFailures)
+}
+
+func (s *schemaPropsValidator) validateOneOf(data interface{}, mainResult, keepResultOneOf *Result) {
+ // Validates exactly one in oneOf schemas
+ var (
+ firstSuccess, bestFailures *Result
+ validated int
+ )
+
+ for i, oneOfSchema := range s.oneOfValidators {
+ result := oneOfSchema.Validate(data)
+ if s.Options.recycleValidators {
+ s.oneOfValidators[i] = nil
+ }
+
+ // We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ keepResultOneOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result
+
+ if result.IsValid() {
+ validated++
+ _ = keepResultOneOf.cleared()
+
+ if firstSuccess == nil {
+ firstSuccess = result
+ } else if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
+ }
+
+ continue
+ }
+
+ // MatchCount is used to select errors from the schema with most positive checks
+ if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) {
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
+ }
+ bestFailures = result
+ } else if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
+ }
+ }
+
+ switch validated {
+ case 0:
+ mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, "Found none valid"))
+ mainResult.Merge(bestFailures)
+ // firstSucess necessarily nil
+ case 1:
+ mainResult.Merge(firstSuccess)
+ if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(bestFailures)
+ }
+ default:
+ mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, fmt.Sprintf("Found %d valid alternatives", validated)))
+ mainResult.Merge(bestFailures)
+ if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(firstSuccess)
+ }
+ }
+}
+
+func (s *schemaPropsValidator) validateAllOf(data interface{}, mainResult, keepResultAllOf *Result) {
+ // Validates all of allOf schemas
+ var validated int
+
+ for i, allOfSchema := range s.allOfValidators {
+ result := allOfSchema.Validate(data)
+ if s.Options.recycleValidators {
+ s.allOfValidators[i] = nil
+ }
+ // We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ keepResultAllOf.Merge(result.keepRelevantErrors())
+ if result.IsValid() {
+ validated++
+ }
+ mainResult.Merge(result)
+ }
+
+ switch validated {
+ case 0:
+ mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ". None validated"))
+ case len(s.allOfValidators):
+ default:
+ mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ""))
+ }
+}
+
+func (s *schemaPropsValidator) validateNot(data interface{}, mainResult *Result) {
+ result := s.notValidator.Validate(data)
+ if s.Options.recycleValidators {
+ s.notValidator = nil
+ }
+ // We keep inner IMPORTANT! errors no matter what MatchCount tells us
+ if result.IsValid() {
+ mainResult.AddErrors(mustNotValidatechemaMsg(s.Path))
+ }
+ if result.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(result) // this result is ditched
+ }
+}
+
+func (s *schemaPropsValidator) validateDependencies(data interface{}, mainResult *Result) {
+ val := data.(map[string]interface{})
+ for key := range val {
+ dep, ok := s.Dependencies[key]
+ if !ok {
+ continue
+ }
+
+ if dep.Schema != nil {
+ mainResult.Merge(
+ newSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options).Validate(data),
+ )
+ continue
+ }
+
+ if len(dep.Property) > 0 {
+ for _, depKey := range dep.Property {
+ if _, ok := val[depKey]; !ok {
+ mainResult.AddErrors(hasADependencyMsg(s.Path, depKey))
+ }
+ }
+ }
+ }
+}
+
+func (s *schemaPropsValidator) redeem() {
+ pools.poolOfSchemaPropsValidators.RedeemValidator(s)
+}
+
+func (s *schemaPropsValidator) redeemChildren() {
+ for _, v := range s.anyOfValidators {
+ if v == nil {
+ continue
+ }
+ v.redeemChildren()
+ v.redeem()
+ }
+ s.anyOfValidators = nil
+
+ for _, v := range s.allOfValidators {
+ if v == nil {
+ continue
+ }
+ v.redeemChildren()
+ v.redeem()
+ }
+ s.allOfValidators = nil
+
+ for _, v := range s.oneOfValidators {
+ if v == nil {
+ continue
+ }
+ v.redeemChildren()
+ v.redeem()
+ }
+ s.oneOfValidators = nil
+
+ if s.notValidator != nil {
+ s.notValidator.redeemChildren()
+ s.notValidator.redeem()
+ s.notValidator = nil
+ }
+}
diff --git a/vendor/github.com/go-openapi/validate/slice_validator.go b/vendor/github.com/go-openapi/validate/slice_validator.go
new file mode 100644
index 00000000..13bb0208
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/slice_validator.go
@@ -0,0 +1,150 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+)
+
+type schemaSliceValidator struct {
+ Path string
+ In string
+ MaxItems *int64
+ MinItems *int64
+ UniqueItems bool
+ AdditionalItems *spec.SchemaOrBool
+ Items *spec.SchemaOrArray
+ Root interface{}
+ KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+func newSliceValidator(path, in string,
+ maxItems, minItems *int64, uniqueItems bool,
+ additionalItems *spec.SchemaOrBool, items *spec.SchemaOrArray,
+ root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *schemaSliceValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var v *schemaSliceValidator
+ if opts.recycleValidators {
+ v = pools.poolOfSliceValidators.BorrowValidator()
+ } else {
+ v = new(schemaSliceValidator)
+ }
+
+ v.Path = path
+ v.In = in
+ v.MaxItems = maxItems
+ v.MinItems = minItems
+ v.UniqueItems = uniqueItems
+ v.AdditionalItems = additionalItems
+ v.Items = items
+ v.Root = root
+ v.KnownFormats = formats
+ v.Options = opts
+
+ return v
+}
+
+func (s *schemaSliceValidator) SetPath(path string) {
+ s.Path = path
+}
+
+func (s *schemaSliceValidator) Applies(source interface{}, kind reflect.Kind) bool {
+ _, ok := source.(*spec.Schema)
+ r := ok && kind == reflect.Slice
+ return r
+}
+
+func (s *schemaSliceValidator) Validate(data interface{}) *Result {
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeem()
+ }()
+ }
+
+ var result *Result
+ if s.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+ if data == nil {
+ return result
+ }
+ val := reflect.ValueOf(data)
+ size := val.Len()
+
+ if s.Items != nil && s.Items.Schema != nil {
+ for i := 0; i < size; i++ {
+ validator := newSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options)
+ validator.SetPath(fmt.Sprintf("%s.%d", s.Path, i))
+ value := val.Index(i)
+ result.mergeForSlice(val, i, validator.Validate(value.Interface()))
+ }
+ }
+
+ itemsSize := 0
+ if s.Items != nil && len(s.Items.Schemas) > 0 {
+ itemsSize = len(s.Items.Schemas)
+ for i := 0; i < itemsSize; i++ {
+ if size <= i {
+ break
+ }
+
+ validator := newSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options)
+ result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface()))
+ }
+ }
+ if s.AdditionalItems != nil && itemsSize < size {
+ if s.Items != nil && len(s.Items.Schemas) > 0 && !s.AdditionalItems.Allows {
+ result.AddErrors(arrayDoesNotAllowAdditionalItemsMsg())
+ }
+ if s.AdditionalItems.Schema != nil {
+ for i := itemsSize; i < size-itemsSize+1; i++ {
+ validator := newSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options)
+ result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface()))
+ }
+ }
+ }
+
+ if s.MinItems != nil {
+ if err := MinItems(s.Path, s.In, int64(size), *s.MinItems); err != nil {
+ result.AddErrors(err)
+ }
+ }
+ if s.MaxItems != nil {
+ if err := MaxItems(s.Path, s.In, int64(size), *s.MaxItems); err != nil {
+ result.AddErrors(err)
+ }
+ }
+ if s.UniqueItems {
+ if err := UniqueItems(s.Path, s.In, val.Interface()); err != nil {
+ result.AddErrors(err)
+ }
+ }
+ result.Inc()
+ return result
+}
+
+func (s *schemaSliceValidator) redeem() {
+ pools.poolOfSliceValidators.RedeemValidator(s)
+}
diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go
new file mode 100644
index 00000000..96545256
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/spec.go
@@ -0,0 +1,852 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/go-openapi/analysis"
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/jsonpointer"
+ "github.com/go-openapi/loads"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// Spec validates an OpenAPI 2.0 specification document.
+//
+// Returns an error flattening in a single standard error, all validation messages.
+//
+// - TODO: $ref should not have siblings
+// - TODO: make sure documentation reflects all checks and warnings
+// - TODO: check on discriminators
+// - TODO: explicit message on unsupported keywords (better than "forbidden property"...)
+// - TODO: full list of unresolved refs
+// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples
+// - TODO: option to determine if we validate for go-swagger or in a more general context
+// - TODO: check on required properties to support anyOf, allOf, oneOf
+//
+// NOTE: SecurityScopes are maps: no need to check uniqueness
+func Spec(doc *loads.Document, formats strfmt.Registry) error {
+ errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc)
+ if errs.HasErrors() {
+ return errors.CompositeValidationError(errs.Errors...)
+ }
+ return nil
+}
+
+// SpecValidator validates a swagger 2.0 spec
+type SpecValidator struct {
+ schema *spec.Schema // swagger 2.0 schema
+ spec *loads.Document
+ analyzer *analysis.Spec
+ expanded *loads.Document
+ KnownFormats strfmt.Registry
+ Options Opts // validation options
+ schemaOptions *SchemaValidatorOptions
+}
+
+// NewSpecValidator creates a new swagger spec validator instance
+func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidator {
+ // schema options that apply to all called validators
+ schemaOptions := new(SchemaValidatorOptions)
+ for _, o := range []Option{
+ SwaggerSchema(true),
+ WithRecycleValidators(true),
+ // withRecycleResults(true),
+ } {
+ o(schemaOptions)
+ }
+
+ return &SpecValidator{
+ schema: schema,
+ KnownFormats: formats,
+ Options: defaultOpts,
+ schemaOptions: schemaOptions,
+ }
+}
+
+// Validate validates the swagger spec
+func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) {
+ s.schemaOptions.skipSchemataResult = s.Options.SkipSchemataResult
+ var sd *loads.Document
+ errs, warnings := new(Result), new(Result)
+
+ if v, ok := data.(*loads.Document); ok {
+ sd = v
+ }
+ if sd == nil {
+ errs.AddErrors(invalidDocumentMsg())
+ return errs, warnings // no point in continuing
+ }
+ s.spec = sd
+ s.analyzer = analysis.New(sd.Spec())
+
+ // Raw spec unmarshalling errors
+ var obj interface{}
+ if err := json.Unmarshal(sd.Raw(), &obj); err != nil {
+ // NOTE: under normal conditions, the *load.Document has been already unmarshalled
+ // So this one is just a paranoid check on the behavior of the spec package
+ panic(InvalidDocumentError)
+ }
+
+ defer func() {
+ // errs holds all errors and warnings,
+ // warnings only warnings
+ errs.MergeAsWarnings(warnings)
+ warnings.AddErrors(errs.Warnings...)
+ }()
+
+ // Swagger schema validator
+ schv := newSchemaValidator(s.schema, nil, "", s.KnownFormats, s.schemaOptions)
+ errs.Merge(schv.Validate(obj)) // error -
+ // There may be a point in continuing to try and determine more accurate errors
+ if !s.Options.ContinueOnErrors && errs.HasErrors() {
+ return errs, warnings // no point in continuing
+ }
+
+ errs.Merge(s.validateReferencesValid()) // error -
+ // There may be a point in continuing to try and determine more accurate errors
+ if !s.Options.ContinueOnErrors && errs.HasErrors() {
+ return errs, warnings // no point in continuing
+ }
+
+ errs.Merge(s.validateDuplicateOperationIDs())
+ errs.Merge(s.validateDuplicatePropertyNames()) // error -
+ errs.Merge(s.validateParameters()) // error -
+ errs.Merge(s.validateItems()) // error -
+
+ // Properties in required definition MUST validate their schema
+ // Properties SHOULD NOT be declared as both required and readOnly (warning)
+ errs.Merge(s.validateRequiredDefinitions()) // error and warning
+
+ // There may be a point in continuing to try and determine more accurate errors
+ if !s.Options.ContinueOnErrors && errs.HasErrors() {
+ return errs, warnings // no point in continuing
+ }
+
+ // Values provided as default MUST validate their schema
+ df := &defaultValidator{SpecValidator: s, schemaOptions: s.schemaOptions}
+ errs.Merge(df.Validate())
+
+ // Values provided as examples MUST validate their schema
+ // Value provided as examples in a response without schema generate a warning
+ // Known limitations: examples in responses for mime type not application/json are ignored (warning)
+ ex := &exampleValidator{SpecValidator: s, schemaOptions: s.schemaOptions}
+ errs.Merge(ex.Validate())
+
+ errs.Merge(s.validateNonEmptyPathParamNames())
+
+ // errs.Merge(s.validateRefNoSibling()) // warning only
+ errs.Merge(s.validateReferenced()) // warning only
+
+ return errs, warnings
+}
+
+func (s *SpecValidator) validateNonEmptyPathParamNames() *Result {
+ res := pools.poolOfResults.BorrowResult()
+ if s.spec.Spec().Paths == nil {
+ // There is no Paths object: error
+ res.AddErrors(noValidPathMsg())
+
+ return res
+ }
+
+ if s.spec.Spec().Paths.Paths == nil {
+ // Paths may be empty: warning
+ res.AddWarnings(noValidPathMsg())
+
+ return res
+ }
+
+ for k := range s.spec.Spec().Paths.Paths {
+ if strings.Contains(k, "{}") {
+ res.AddErrors(emptyPathParameterMsg(k))
+ }
+ }
+
+ return res
+}
+
+func (s *SpecValidator) validateDuplicateOperationIDs() *Result {
+ // OperationID, if specified, must be unique across the board
+ var analyzer *analysis.Spec
+ if s.expanded != nil {
+ // $ref are valid: we can analyze operations on an expanded spec
+ analyzer = analysis.New(s.expanded.Spec())
+ } else {
+ // fallback on possible incomplete picture because of previous errors
+ analyzer = s.analyzer
+ }
+ res := pools.poolOfResults.BorrowResult()
+ known := make(map[string]int)
+ for _, v := range analyzer.OperationIDs() {
+ if v != "" {
+ known[v]++
+ }
+ }
+ for k, v := range known {
+ if v > 1 {
+ res.AddErrors(nonUniqueOperationIDMsg(k, v))
+ }
+ }
+ return res
+}
+
+type dupProp struct {
+ Name string
+ Definition string
+}
+
+func (s *SpecValidator) validateDuplicatePropertyNames() *Result {
+ // definition can't declare a property that's already defined by one of its ancestors
+ res := pools.poolOfResults.BorrowResult()
+ for k, sch := range s.spec.Spec().Definitions {
+ if len(sch.AllOf) == 0 {
+ continue
+ }
+
+ knownanc := map[string]struct{}{
+ "#/definitions/" + k: {},
+ }
+
+ ancs, rec := s.validateCircularAncestry(k, sch, knownanc)
+ if rec != nil && (rec.HasErrors() || !rec.HasWarnings()) {
+ res.Merge(rec)
+ }
+ if len(ancs) > 0 {
+ res.AddErrors(circularAncestryDefinitionMsg(k, ancs))
+ return res
+ }
+
+ knowns := make(map[string]struct{})
+ dups, rep := s.validateSchemaPropertyNames(k, sch, knowns)
+ if rep != nil && (rep.HasErrors() || rep.HasWarnings()) {
+ res.Merge(rep)
+ }
+ if len(dups) > 0 {
+ var pns []string
+ for _, v := range dups {
+ pns = append(pns, v.Definition+"."+v.Name)
+ }
+ res.AddErrors(duplicatePropertiesMsg(k, pns))
+ }
+
+ }
+ return res
+}
+
+func (s *SpecValidator) resolveRef(ref *spec.Ref) (*spec.Schema, error) {
+ if s.spec.SpecFilePath() != "" {
+ return spec.ResolveRefWithBase(s.spec.Spec(), ref, &spec.ExpandOptions{RelativeBase: s.spec.SpecFilePath()})
+ }
+ // NOTE: it looks like with the new spec resolver, this code is now unrecheable
+ return spec.ResolveRef(s.spec.Spec(), ref)
+}
+
+func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, knowns map[string]struct{}) ([]dupProp, *Result) {
+ var dups []dupProp
+
+ schn := nm
+ schc := &sch
+ res := pools.poolOfResults.BorrowResult()
+
+ for schc.Ref.String() != "" {
+ // gather property names
+ reso, err := s.resolveRef(&schc.Ref)
+ if err != nil {
+ errorHelp.addPointerError(res, err, schc.Ref.String(), nm)
+ return dups, res
+ }
+ schc = reso
+ schn = sch.Ref.String()
+ }
+
+ if len(schc.AllOf) > 0 {
+ for _, chld := range schc.AllOf {
+ dup, rep := s.validateSchemaPropertyNames(schn, chld, knowns)
+ if rep != nil && (rep.HasErrors() || rep.HasWarnings()) {
+ res.Merge(rep)
+ }
+ dups = append(dups, dup...)
+ }
+ return dups, res
+ }
+
+ for k := range schc.Properties {
+ _, ok := knowns[k]
+ if ok {
+ dups = append(dups, dupProp{Name: k, Definition: schn})
+ } else {
+ knowns[k] = struct{}{}
+ }
+ }
+
+ return dups, res
+}
+
+func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, knowns map[string]struct{}) ([]string, *Result) {
+ res := pools.poolOfResults.BorrowResult()
+
+ if sch.Ref.String() == "" && len(sch.AllOf) == 0 { // Safeguard. We should not be able to actually get there
+ return nil, res
+ }
+ var ancs []string
+
+ schn := nm
+ schc := &sch
+
+ for schc.Ref.String() != "" {
+ reso, err := s.resolveRef(&schc.Ref)
+ if err != nil {
+ errorHelp.addPointerError(res, err, schc.Ref.String(), nm)
+ return ancs, res
+ }
+ schc = reso
+ schn = sch.Ref.String()
+ }
+
+ if schn != nm && schn != "" {
+ if _, ok := knowns[schn]; ok {
+ ancs = append(ancs, schn)
+ }
+ knowns[schn] = struct{}{}
+
+ if len(ancs) > 0 {
+ return ancs, res
+ }
+ }
+
+ if len(schc.AllOf) > 0 {
+ for _, chld := range schc.AllOf {
+ if chld.Ref.String() != "" || len(chld.AllOf) > 0 {
+ anc, rec := s.validateCircularAncestry(schn, chld, knowns)
+ if rec != nil && (rec.HasErrors() || !rec.HasWarnings()) {
+ res.Merge(rec)
+ }
+ ancs = append(ancs, anc...)
+ if len(ancs) > 0 {
+ return ancs, res
+ }
+ }
+ }
+ }
+ return ancs, res
+}
+
+func (s *SpecValidator) validateItems() *Result {
+ // validate parameter, items, schema and response objects for presence of item if type is array
+ res := pools.poolOfResults.BorrowResult()
+
+ for method, pi := range s.analyzer.Operations() {
+ for path, op := range pi {
+ for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) {
+
+ if param.TypeName() == arrayType && param.ItemsTypeName() == "" {
+ res.AddErrors(arrayInParamRequiresItemsMsg(param.Name, op.ID))
+ continue
+ }
+ if param.In != swaggerBody {
+ if param.Items != nil {
+ items := param.Items
+ for items.TypeName() == arrayType {
+ if items.ItemsTypeName() == "" {
+ res.AddErrors(arrayInParamRequiresItemsMsg(param.Name, op.ID))
+ break
+ }
+ items = items.Items
+ }
+ }
+ } else {
+ // In: body
+ if param.Schema != nil {
+ res.Merge(s.validateSchemaItems(*param.Schema, fmt.Sprintf("body param %q", param.Name), op.ID))
+ }
+ }
+ }
+
+ var responses []spec.Response
+ if op.Responses != nil {
+ if op.Responses.Default != nil {
+ responses = append(responses, *op.Responses.Default)
+ }
+ if op.Responses.StatusCodeResponses != nil {
+ for _, v := range op.Responses.StatusCodeResponses {
+ responses = append(responses, v)
+ }
+ }
+ }
+
+ for _, resp := range responses {
+ // Response headers with array
+ for hn, hv := range resp.Headers {
+ if hv.TypeName() == arrayType && hv.ItemsTypeName() == "" {
+ res.AddErrors(arrayInHeaderRequiresItemsMsg(hn, op.ID))
+ }
+ }
+ if resp.Schema != nil {
+ res.Merge(s.validateSchemaItems(*resp.Schema, "response body", op.ID))
+ }
+ }
+ }
+ }
+ return res
+}
+
+// Verifies constraints on array type
+func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result {
+ res := pools.poolOfResults.BorrowResult()
+ if !schema.Type.Contains(arrayType) {
+ return res
+ }
+
+ if schema.Items == nil || schema.Items.Len() == 0 {
+ res.AddErrors(arrayRequiresItemsMsg(prefix, opID))
+ return res
+ }
+
+ if schema.Items.Schema != nil {
+ schema = *schema.Items.Schema
+ if _, err := compileRegexp(schema.Pattern); err != nil {
+ res.AddErrors(invalidItemsPatternMsg(prefix, opID, schema.Pattern))
+ }
+
+ res.Merge(s.validateSchemaItems(schema, prefix, opID))
+ }
+ return res
+}
+
+func (s *SpecValidator) validatePathParamPresence(path string, fromPath, fromOperation []string) *Result {
+ // Each defined operation path parameters must correspond to a named element in the API's path pattern.
+ // (For example, you cannot have a path parameter named id for the following path /pets/{petId} but you must have a path parameter named petId.)
+ res := pools.poolOfResults.BorrowResult()
+ for _, l := range fromPath {
+ var matched bool
+ for _, r := range fromOperation {
+ if l == "{"+r+"}" {
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ res.AddErrors(noParameterInPathMsg(l))
+ }
+ }
+
+ for _, p := range fromOperation {
+ var matched bool
+ for _, r := range fromPath {
+ if "{"+p+"}" == r {
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ res.AddErrors(pathParamNotInPathMsg(path, p))
+ }
+ }
+
+ return res
+}
+
+func (s *SpecValidator) validateReferenced() *Result {
+ var res Result
+ res.MergeAsWarnings(s.validateReferencedParameters())
+ res.MergeAsWarnings(s.validateReferencedResponses())
+ res.MergeAsWarnings(s.validateReferencedDefinitions())
+ return &res
+}
+
+func (s *SpecValidator) validateReferencedParameters() *Result {
+ // Each referenceable definition should have references.
+ params := s.spec.Spec().Parameters
+ if len(params) == 0 {
+ return nil
+ }
+
+ expected := make(map[string]struct{})
+ for k := range params {
+ expected["#/parameters/"+jsonpointer.Escape(k)] = struct{}{}
+ }
+ for _, k := range s.analyzer.AllParameterReferences() {
+ delete(expected, k)
+ }
+
+ if len(expected) == 0 {
+ return nil
+ }
+ result := pools.poolOfResults.BorrowResult()
+ for k := range expected {
+ result.AddWarnings(unusedParamMsg(k))
+ }
+ return result
+}
+
+func (s *SpecValidator) validateReferencedResponses() *Result {
+ // Each referenceable definition should have references.
+ responses := s.spec.Spec().Responses
+ if len(responses) == 0 {
+ return nil
+ }
+
+ expected := make(map[string]struct{})
+ for k := range responses {
+ expected["#/responses/"+jsonpointer.Escape(k)] = struct{}{}
+ }
+ for _, k := range s.analyzer.AllResponseReferences() {
+ delete(expected, k)
+ }
+
+ if len(expected) == 0 {
+ return nil
+ }
+ result := pools.poolOfResults.BorrowResult()
+ for k := range expected {
+ result.AddWarnings(unusedResponseMsg(k))
+ }
+ return result
+}
+
+func (s *SpecValidator) validateReferencedDefinitions() *Result {
+ // Each referenceable definition must have references.
+ defs := s.spec.Spec().Definitions
+ if len(defs) == 0 {
+ return nil
+ }
+
+ expected := make(map[string]struct{})
+ for k := range defs {
+ expected["#/definitions/"+jsonpointer.Escape(k)] = struct{}{}
+ }
+ for _, k := range s.analyzer.AllDefinitionReferences() {
+ delete(expected, k)
+ }
+
+ if len(expected) == 0 {
+ return nil
+ }
+
+ result := new(Result)
+ for k := range expected {
+ result.AddWarnings(unusedDefinitionMsg(k))
+ }
+ return result
+}
+
+func (s *SpecValidator) validateRequiredDefinitions() *Result {
+ // Each property listed in the required array must be defined in the properties of the model
+ res := pools.poolOfResults.BorrowResult()
+
+DEFINITIONS:
+ for d, schema := range s.spec.Spec().Definitions {
+ if schema.Required != nil { // Safeguard
+ for _, pn := range schema.Required {
+ red := s.validateRequiredProperties(pn, d, &schema) //#nosec
+ res.Merge(red)
+ if !red.IsValid() && !s.Options.ContinueOnErrors {
+ break DEFINITIONS // there is an error, let's stop that bleeding
+ }
+ }
+ }
+ }
+ return res
+}
+
+func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Schema) *Result {
+ // Takes care of recursive property definitions, which may be nested in additionalProperties schemas
+ res := pools.poolOfResults.BorrowResult()
+ propertyMatch := false
+ patternMatch := false
+ additionalPropertiesMatch := false
+ isReadOnly := false
+
+ // Regular properties
+ if _, ok := v.Properties[path]; ok {
+ propertyMatch = true
+ isReadOnly = v.Properties[path].ReadOnly
+ }
+
+ // NOTE: patternProperties are not supported in swagger. Even though, we continue validation here
+ // We check all defined patterns: if one regexp is invalid, croaks an error
+ for pp, pv := range v.PatternProperties {
+ re, err := compileRegexp(pp)
+ if err != nil {
+ res.AddErrors(invalidPatternMsg(pp, in))
+ } else if re.MatchString(path) {
+ patternMatch = true
+ if !propertyMatch {
+ isReadOnly = pv.ReadOnly
+ }
+ }
+ }
+
+ if !(propertyMatch || patternMatch) {
+ if v.AdditionalProperties != nil {
+ if v.AdditionalProperties.Allows && v.AdditionalProperties.Schema == nil {
+ additionalPropertiesMatch = true
+ } else if v.AdditionalProperties.Schema != nil {
+ // additionalProperties as schema are upported in swagger
+ // recursively validates additionalProperties schema
+ // TODO : anyOf, allOf, oneOf like in schemaPropsValidator
+ red := s.validateRequiredProperties(path, in, v.AdditionalProperties.Schema)
+ if red.IsValid() {
+ additionalPropertiesMatch = true
+ if !propertyMatch && !patternMatch {
+ isReadOnly = v.AdditionalProperties.Schema.ReadOnly
+ }
+ }
+ res.Merge(red)
+ }
+ }
+ }
+
+ if !(propertyMatch || patternMatch || additionalPropertiesMatch) {
+ res.AddErrors(requiredButNotDefinedMsg(path, in))
+ }
+
+ if isReadOnly {
+ res.AddWarnings(readOnlyAndRequiredMsg(in, path))
+ }
+ return res
+}
+
+func (s *SpecValidator) validateParameters() *Result {
+ // - for each method, path is unique, regardless of path parameters
+ // e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are
+ // considered duplicate paths, if StrictPathParamUniqueness is enabled.
+ // - each parameter should have a unique `name` and `type` combination
+ // - each operation should have only 1 parameter of type body
+ // - there must be at most 1 parameter in body
+ // - parameters with pattern property must specify valid patterns
+ // - $ref in parameters must resolve
+ // - path param must be required
+ res := pools.poolOfResults.BorrowResult()
+ rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`)
+ for method, pi := range s.expandedAnalyzer().Operations() {
+ methodPaths := make(map[string]map[string]string)
+ for path, op := range pi {
+ if s.Options.StrictPathParamUniqueness {
+ pathToAdd := pathHelp.stripParametersInPath(path)
+
+ // Warn on garbled path afer param stripping
+ if rexGarbledPathSegment.MatchString(pathToAdd) {
+ res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd))
+ }
+
+ // Check uniqueness of stripped paths
+ if _, found := methodPaths[method][pathToAdd]; found {
+
+ // Sort names for stable, testable output
+ if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 {
+ res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd]))
+ } else {
+ res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path))
+ }
+ } else {
+ if _, found := methodPaths[method]; !found {
+ methodPaths[method] = map[string]string{}
+ }
+ methodPaths[method][pathToAdd] = path // Original non stripped path
+
+ }
+ }
+
+ var bodyParams []string
+ var paramNames []string
+ var hasForm, hasBody bool
+
+ // Check parameters names uniqueness for operation
+ // TODO: should be done after param expansion
+ res.Merge(s.checkUniqueParams(path, method, op))
+
+ // pick the root schema from the swagger specification which describes a parameter
+ origSchema, ok := s.schema.Definitions["parameter"]
+ if !ok {
+ panic("unexpected swagger schema: missing #/definitions/parameter")
+ }
+ // clone it once to avoid expanding a global schema (e.g. swagger spec)
+ paramSchema, err := deepCloneSchema(origSchema)
+ if err != nil {
+ panic(fmt.Errorf("can't clone schema: %v", err))
+ }
+
+ for _, pr := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) {
+ // An expanded parameter must validate the Parameter schema (an unexpanded $ref always passes high-level schema validation)
+ schv := newSchemaValidator(¶mSchema, s.schema, fmt.Sprintf("%s.%s.parameters.%s", path, method, pr.Name), s.KnownFormats, s.schemaOptions)
+ obj := swag.ToDynamicJSON(pr)
+ res.Merge(schv.Validate(obj))
+
+ // Validate pattern regexp for parameters with a Pattern property
+ if _, err := compileRegexp(pr.Pattern); err != nil {
+ res.AddErrors(invalidPatternInParamMsg(op.ID, pr.Name, pr.Pattern))
+ }
+
+ // There must be at most one parameter in body: list them all
+ if pr.In == swaggerBody {
+ bodyParams = append(bodyParams, fmt.Sprintf("%q", pr.Name))
+ hasBody = true
+ }
+
+ if pr.In == "path" {
+ paramNames = append(paramNames, pr.Name)
+ // Path declared in path must have the required: true property
+ if !pr.Required {
+ res.AddErrors(pathParamRequiredMsg(op.ID, pr.Name))
+ }
+ }
+
+ if pr.In == "formData" {
+ hasForm = true
+ }
+
+ if !(pr.Type == numberType || pr.Type == integerType) &&
+ (pr.Maximum != nil || pr.Minimum != nil || pr.MultipleOf != nil) {
+ // A non-numeric parameter has validation keywords for numeric instances (number and integer)
+ res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type))
+ }
+
+ if !(pr.Type == stringType) &&
+ // A non-string parameter has validation keywords for strings
+ (pr.MaxLength != nil || pr.MinLength != nil || pr.Pattern != "") {
+ res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type))
+ }
+
+ if !(pr.Type == arrayType) &&
+ // A non-array parameter has validation keywords for arrays
+ (pr.MaxItems != nil || pr.MinItems != nil || pr.UniqueItems) {
+ res.AddWarnings(parameterValidationTypeMismatchMsg(pr.Name, path, pr.Type))
+ }
+ }
+
+ // In:formData and In:body are mutually exclusive
+ if hasBody && hasForm {
+ res.AddErrors(bothFormDataAndBodyMsg(op.ID))
+ }
+ // There must be at most one body param
+ // Accurately report situations when more than 1 body param is declared (possibly unnamed)
+ if len(bodyParams) > 1 {
+ sort.Strings(bodyParams)
+ res.AddErrors(multipleBodyParamMsg(op.ID, bodyParams))
+ }
+
+ // Check uniqueness of parameters in path
+ paramsInPath := pathHelp.extractPathParams(path)
+ for i, p := range paramsInPath {
+ for j, q := range paramsInPath {
+ if p == q && i > j {
+ res.AddErrors(pathParamNotUniqueMsg(path, p, q))
+ break
+ }
+ }
+ }
+
+ // Warns about possible malformed params in path
+ rexGarbledParam := mustCompileRegexp(`{.*[{}\s]+.*}`)
+ for _, p := range paramsInPath {
+ if rexGarbledParam.MatchString(p) {
+ res.AddWarnings(pathParamGarbledMsg(path, p))
+ }
+ }
+
+ // Match params from path vs params from params section
+ res.Merge(s.validatePathParamPresence(path, paramsInPath, paramNames))
+ }
+ }
+ return res
+}
+
+func (s *SpecValidator) validateReferencesValid() *Result {
+ // each reference must point to a valid object
+ res := pools.poolOfResults.BorrowResult()
+ for _, r := range s.analyzer.AllRefs() {
+ if !r.IsValidURI(s.spec.SpecFilePath()) { // Safeguard - spec should always yield a valid URI
+ res.AddErrors(invalidRefMsg(r.String()))
+ }
+ }
+ if !res.HasErrors() {
+ // NOTE: with default settings, loads.Document.Expanded()
+ // stops on first error. Anyhow, the expand option to continue
+ // on errors fails to report errors at all.
+ exp, err := s.spec.Expanded()
+ if err != nil {
+ res.AddErrors(unresolvedReferencesMsg(err))
+ }
+ s.expanded = exp
+ }
+ return res
+}
+
+func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operation) *Result {
+ // Check for duplicate parameters declaration in param section.
+ // Each parameter should have a unique `name` and `type` combination
+ // NOTE: this could be factorized in analysis (when constructing the params map)
+ // However, there are some issues with such a factorization:
+ // - analysis does not seem to fully expand params
+ // - param keys may be altered by x-go-name
+ res := pools.poolOfResults.BorrowResult()
+ pnames := make(map[string]struct{})
+
+ if op.Parameters != nil { // Safeguard
+ for _, ppr := range op.Parameters {
+ var ok bool
+ pr, red := paramHelp.resolveParam(path, method, op.ID, &ppr, s) //#nosec
+ res.Merge(red)
+
+ if pr != nil && pr.Name != "" { // params with empty name does no participate the check
+ key := fmt.Sprintf("%s#%s", pr.In, pr.Name)
+
+ if _, ok = pnames[key]; ok {
+ res.AddErrors(duplicateParamNameMsg(pr.In, pr.Name, op.ID))
+ }
+ pnames[key] = struct{}{}
+ }
+ }
+ }
+ return res
+}
+
+// SetContinueOnErrors sets the ContinueOnErrors option for this validator.
+func (s *SpecValidator) SetContinueOnErrors(c bool) {
+ s.Options.ContinueOnErrors = c
+}
+
+// expandedAnalyzer returns expanded.Analyzer when it is available.
+// otherwise just analyzer.
+func (s *SpecValidator) expandedAnalyzer() *analysis.Spec {
+ if s.expanded != nil && s.expanded.Analyzer != nil {
+ return s.expanded.Analyzer
+ }
+ return s.analyzer
+}
+
+func deepCloneSchema(src spec.Schema) (spec.Schema, error) {
+ var b bytes.Buffer
+ if err := gob.NewEncoder(&b).Encode(src); err != nil {
+ return spec.Schema{}, err
+ }
+
+ var dst spec.Schema
+ if err := gob.NewDecoder(&b).Decode(&dst); err != nil {
+ return spec.Schema{}, err
+ }
+
+ return dst, nil
+}
diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go
new file mode 100644
index 00000000..6d1f0f81
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/spec_messages.go
@@ -0,0 +1,366 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "net/http"
+
+ "github.com/go-openapi/errors"
+)
+
+// Error messages related to spec validation and returned as results.
+const (
+ // ArrayRequiresItemsError ...
+ ArrayRequiresItemsError = "%s for %q is a collection without an element type (array requires items definition)"
+
+ // ArrayInParamRequiresItemsError ...
+ ArrayInParamRequiresItemsError = "param %q for %q is a collection without an element type (array requires item definition)"
+
+ // ArrayInHeaderRequiresItemsError ...
+ ArrayInHeaderRequiresItemsError = "header %q for %q is a collection without an element type (array requires items definition)"
+
+ // BothFormDataAndBodyError indicates that an operation specifies both a body and a formData parameter, which is forbidden
+ BothFormDataAndBodyError = "operation %q has both formData and body parameters. Only one such In: type may be used for a given operation"
+
+ // CannotResolveRefError when a $ref could not be resolved
+ CannotResolveReferenceError = "could not resolve reference in %s to $ref %s: %v"
+
+ // CircularAncestryDefinitionError ...
+ CircularAncestryDefinitionError = "definition %q has circular ancestry: %v"
+
+ // DefaultValueDoesNotValidateError results from an invalid default value provided
+ DefaultValueDoesNotValidateError = "default value for %s in %s does not validate its schema"
+
+ // DefaultValueItemsDoesNotValidateError results from an invalid default value provided for Items
+ DefaultValueItemsDoesNotValidateError = "default value for %s.items in %s does not validate its schema"
+
+ // DefaultValueHeaderDoesNotValidateError results from an invalid default value provided in header
+ DefaultValueHeaderDoesNotValidateError = "in operation %q, default value in header %s for %s does not validate its schema"
+
+ // DefaultValueHeaderItemsDoesNotValidateError results from an invalid default value provided in header.items
+ DefaultValueHeaderItemsDoesNotValidateError = "in operation %q, default value in header.items %s for %s does not validate its schema"
+
+ // DefaultValueInDoesNotValidateError ...
+ DefaultValueInDoesNotValidateError = "in operation %q, default value in %s does not validate its schema"
+
+ // DuplicateParamNameError ...
+ DuplicateParamNameError = "duplicate parameter name %q for %q in operation %q"
+
+ // DuplicatePropertiesError ...
+ DuplicatePropertiesError = "definition %q contains duplicate properties: %v"
+
+ // ExampleValueDoesNotValidateError results from an invalid example value provided
+ ExampleValueDoesNotValidateError = "example value for %s in %s does not validate its schema"
+
+ // ExampleValueItemsDoesNotValidateError results from an invalid example value provided for Items
+ ExampleValueItemsDoesNotValidateError = "example value for %s.items in %s does not validate its schema"
+
+ // ExampleValueHeaderDoesNotValidateError results from an invalid example value provided in header
+ ExampleValueHeaderDoesNotValidateError = "in operation %q, example value in header %s for %s does not validate its schema"
+
+ // ExampleValueHeaderItemsDoesNotValidateError results from an invalid example value provided in header.items
+ ExampleValueHeaderItemsDoesNotValidateError = "in operation %q, example value in header.items %s for %s does not validate its schema"
+
+ // ExampleValueInDoesNotValidateError ...
+ ExampleValueInDoesNotValidateError = "in operation %q, example value in %s does not validate its schema"
+
+ // EmptyPathParameterError means that a path parameter was found empty (e.g. "{}")
+ EmptyPathParameterError = "%q contains an empty path parameter"
+
+ // InvalidDocumentError states that spec validation only processes spec.Document objects
+ InvalidDocumentError = "spec validator can only validate spec.Document objects"
+
+ // InvalidItemsPatternError indicates an Items definition with invalid pattern
+ InvalidItemsPatternError = "%s for %q has invalid items pattern: %q"
+
+ // InvalidParameterDefinitionError indicates an error detected on a parameter definition
+ InvalidParameterDefinitionError = "invalid definition for parameter %s in %s in operation %q"
+
+ // InvalidParameterDefinitionAsSchemaError indicates an error detected on a parameter definition, which was mistaken with a schema definition.
+ // Most likely, this situation is encountered whenever a $ref has been added as a sibling of the parameter definition.
+ InvalidParameterDefinitionAsSchemaError = "invalid definition as Schema for parameter %s in %s in operation %q"
+
+ // InvalidPatternError ...
+ InvalidPatternError = "pattern %q is invalid in %s"
+
+ // InvalidPatternInError indicates an invalid pattern in a schema or items definition
+ InvalidPatternInError = "%s in %s has invalid pattern: %q"
+
+ // InvalidPatternInHeaderError indicates a header definition with an invalid pattern
+ InvalidPatternInHeaderError = "in operation %q, header %s for %s has invalid pattern %q: %v"
+
+ // InvalidPatternInParamError ...
+ InvalidPatternInParamError = "operation %q has invalid pattern in param %q: %q"
+
+ // InvalidReferenceError indicates that a $ref property could not be resolved
+ InvalidReferenceError = "invalid ref %q"
+
+ // InvalidResponseDefinitionAsSchemaError indicates an error detected on a response definition, which was mistaken with a schema definition.
+ // Most likely, this situation is encountered whenever a $ref has been added as a sibling of the response definition.
+ InvalidResponseDefinitionAsSchemaError = "invalid definition as Schema for response %s in %s"
+
+ // MultipleBodyParamError indicates that an operation specifies multiple parameter with in: body
+ MultipleBodyParamError = "operation %q has more than 1 body param: %v"
+
+ // NonUniqueOperationIDError indicates that the same operationId has been specified several times
+ NonUniqueOperationIDError = "%q is defined %d times"
+
+ // NoParameterInPathError indicates that a path was found without any parameter
+ NoParameterInPathError = "path param %q has no parameter definition"
+
+ // NoValidPathErrorOrWarning indicates that no single path could be validated. If Paths is empty, this message is only a warning.
+ NoValidPathErrorOrWarning = "spec has no valid path defined"
+
+ // NoValidResponseError indicates that no valid response description could be found for an operation
+ NoValidResponseError = "operation %q has no valid response"
+
+ // PathOverlapError ...
+ PathOverlapError = "path %s overlaps with %s"
+
+ // PathParamNotInPathError indicates that a parameter specified with in: path was not found in the path specification
+ PathParamNotInPathError = "path param %q is not present in path %q"
+
+ // PathParamNotUniqueError ...
+ PathParamNotUniqueError = "params in path %q must be unique: %q conflicts with %q"
+
+ // PathParamNotRequiredError ...
+ PathParamRequiredError = "in operation %q,path param %q must be declared as required"
+
+ // RefNotAllowedInHeaderError indicates a $ref was found in a header definition, which is not allowed by Swagger
+ RefNotAllowedInHeaderError = "IMPORTANT!in %q: $ref are not allowed in headers. In context for header %q%s"
+
+ // RequiredButNotDefinedError ...
+ RequiredButNotDefinedError = "%q is present in required but not defined as property in definition %q"
+
+ // SomeParametersBrokenError indicates that some parameters could not be resolved, which might result in partial checks to be carried on
+ SomeParametersBrokenError = "some parameters definitions are broken in %q.%s. Cannot carry on full checks on parameters for operation %s"
+
+ // UnresolvedReferencesError indicates that at least one $ref could not be resolved
+ UnresolvedReferencesError = "some references could not be resolved in spec. First found: %v"
+)
+
+// Warning messages related to spec validation and returned as results
+const (
+ // ExamplesWithoutSchemaWarning indicates that examples are provided for a response,but not schema to validate the example against
+ ExamplesWithoutSchemaWarning = "Examples provided without schema in operation %q, %s"
+
+ // ExamplesMimeNotSupportedWarning indicates that examples are provided with a mime type different than application/json, which
+ // the validator dos not support yetl
+ ExamplesMimeNotSupportedWarning = "No validation attempt for examples for media types other than application/json, in operation %q, %s"
+
+ // PathParamGarbledWarning ...
+ PathParamGarbledWarning = "in path %q, param %q contains {,} or white space. Albeit not stricly illegal, this is probably no what you want"
+
+ // ParamValidationTypeMismatch indicates that parameter has validation which does not match its type
+ ParamValidationTypeMismatch = "validation keywords of parameter %q in path %q don't match its type %s"
+
+ // PathStrippedParamGarbledWarning ...
+ PathStrippedParamGarbledWarning = "path stripped from path parameters %s contains {,} or white space. This is probably no what you want."
+
+ // ReadOnlyAndRequiredWarning ...
+ ReadOnlyAndRequiredWarning = "Required property %s in %q should not be marked as both required and readOnly"
+
+ // RefShouldNotHaveSiblingsWarning indicates that a $ref was found with a sibling definition. This results in the $ref taking over its siblings,
+ // which is most likely not wanted.
+ RefShouldNotHaveSiblingsWarning = "$ref property should have no sibling in %q.%s"
+
+ // RequiredHasDefaultWarning indicates that a required parameter property should not have a default
+ RequiredHasDefaultWarning = "%s in %s has a default value and is required as parameter"
+
+ // UnusedDefinitionWarning ...
+ UnusedDefinitionWarning = "definition %q is not used anywhere"
+
+ // UnusedParamWarning ...
+ UnusedParamWarning = "parameter %q is not used anywhere"
+
+ // UnusedResponseWarning ...
+ UnusedResponseWarning = "response %q is not used anywhere"
+
+ InvalidObject = "expected an object in %q.%s"
+)
+
+// Additional error codes
+const (
+ // InternalErrorCode reports an internal technical error
+ InternalErrorCode = http.StatusInternalServerError
+ // NotFoundErrorCode indicates that a resource (e.g. a $ref) could not be found
+ NotFoundErrorCode = http.StatusNotFound
+)
+
+func invalidDocumentMsg() errors.Error {
+ return errors.New(InternalErrorCode, InvalidDocumentError)
+}
+func invalidRefMsg(path string) errors.Error {
+ return errors.New(NotFoundErrorCode, InvalidReferenceError, path)
+}
+func unresolvedReferencesMsg(err error) errors.Error {
+ return errors.New(errors.CompositeErrorCode, UnresolvedReferencesError, err)
+}
+func noValidPathMsg() errors.Error {
+ return errors.New(errors.CompositeErrorCode, NoValidPathErrorOrWarning)
+}
+func emptyPathParameterMsg(path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, EmptyPathParameterError, path)
+}
+func nonUniqueOperationIDMsg(path string, i int) errors.Error {
+ return errors.New(errors.CompositeErrorCode, NonUniqueOperationIDError, path, i)
+}
+func circularAncestryDefinitionMsg(path string, args interface{}) errors.Error {
+ return errors.New(errors.CompositeErrorCode, CircularAncestryDefinitionError, path, args)
+}
+func duplicatePropertiesMsg(path string, args interface{}) errors.Error {
+ return errors.New(errors.CompositeErrorCode, DuplicatePropertiesError, path, args)
+}
+func pathParamNotInPathMsg(path, param string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, PathParamNotInPathError, param, path)
+}
+func arrayRequiresItemsMsg(path, operation string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ArrayRequiresItemsError, path, operation)
+}
+func arrayInParamRequiresItemsMsg(path, operation string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ArrayInParamRequiresItemsError, path, operation)
+}
+func arrayInHeaderRequiresItemsMsg(path, operation string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ArrayInHeaderRequiresItemsError, path, operation)
+}
+func invalidItemsPatternMsg(path, operation, pattern string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, InvalidItemsPatternError, path, operation, pattern)
+}
+func invalidPatternMsg(pattern, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, InvalidPatternError, pattern, path)
+}
+func requiredButNotDefinedMsg(path, definition string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, RequiredButNotDefinedError, path, definition)
+}
+func pathParamGarbledMsg(path, param string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, PathParamGarbledWarning, path, param)
+}
+func pathStrippedParamGarbledMsg(path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, PathStrippedParamGarbledWarning, path)
+}
+func pathOverlapMsg(path, arg string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, PathOverlapError, path, arg)
+}
+func invalidPatternInParamMsg(operation, param, pattern string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, InvalidPatternInParamError, operation, param, pattern)
+}
+func pathParamRequiredMsg(operation, param string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, PathParamRequiredError, operation, param)
+}
+func bothFormDataAndBodyMsg(operation string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, BothFormDataAndBodyError, operation)
+}
+func multipleBodyParamMsg(operation string, args interface{}) errors.Error {
+ return errors.New(errors.CompositeErrorCode, MultipleBodyParamError, operation, args)
+}
+func pathParamNotUniqueMsg(path, param, arg string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, PathParamNotUniqueError, path, param, arg)
+}
+func duplicateParamNameMsg(path, param, operation string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, DuplicateParamNameError, param, path, operation)
+}
+func unusedParamMsg(arg string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, UnusedParamWarning, arg)
+}
+func unusedDefinitionMsg(arg string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, UnusedDefinitionWarning, arg)
+}
+func unusedResponseMsg(arg string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, UnusedResponseWarning, arg)
+}
+func readOnlyAndRequiredMsg(path, param string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ReadOnlyAndRequiredWarning, param, path)
+}
+func noParameterInPathMsg(param string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, NoParameterInPathError, param)
+}
+func requiredHasDefaultMsg(param, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, RequiredHasDefaultWarning, param, path)
+}
+func defaultValueDoesNotValidateMsg(param, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, DefaultValueDoesNotValidateError, param, path)
+}
+func defaultValueItemsDoesNotValidateMsg(param, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, DefaultValueItemsDoesNotValidateError, param, path)
+}
+func noValidResponseMsg(operation string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, NoValidResponseError, operation)
+}
+func defaultValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, DefaultValueHeaderDoesNotValidateError, operation, header, path)
+}
+func defaultValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, DefaultValueHeaderItemsDoesNotValidateError, operation, header, path)
+}
+func invalidPatternInHeaderMsg(operation, header, path, pattern string, args interface{}) errors.Error {
+ return errors.New(errors.CompositeErrorCode, InvalidPatternInHeaderError, operation, header, path, pattern, args)
+}
+func invalidPatternInMsg(path, in, pattern string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, InvalidPatternInError, path, in, pattern)
+}
+func defaultValueInDoesNotValidateMsg(operation, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, DefaultValueInDoesNotValidateError, operation, path)
+}
+func exampleValueDoesNotValidateMsg(param, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ExampleValueDoesNotValidateError, param, path)
+}
+func exampleValueItemsDoesNotValidateMsg(param, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ExampleValueItemsDoesNotValidateError, param, path)
+}
+func exampleValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ExampleValueHeaderDoesNotValidateError, operation, header, path)
+}
+func exampleValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ExampleValueHeaderItemsDoesNotValidateError, operation, header, path)
+}
+func exampleValueInDoesNotValidateMsg(operation, path string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ExampleValueInDoesNotValidateError, operation, path)
+}
+func examplesWithoutSchemaMsg(operation, response string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ExamplesWithoutSchemaWarning, operation, response)
+}
+func examplesMimeNotSupportedMsg(operation, response string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ExamplesMimeNotSupportedWarning, operation, response)
+}
+func refNotAllowedInHeaderMsg(path, header, ref string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, RefNotAllowedInHeaderError, path, header, ref)
+}
+func cannotResolveRefMsg(path, ref string, err error) errors.Error {
+ return errors.New(errors.CompositeErrorCode, CannotResolveReferenceError, path, ref, err)
+}
+func invalidParameterDefinitionMsg(path, method, operationID string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionError, path, method, operationID)
+}
+func invalidParameterDefinitionAsSchemaMsg(path, method, operationID string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionAsSchemaError, path, method, operationID)
+}
+func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, ParamValidationTypeMismatch, param, path, typ)
+}
+func invalidObjectMsg(path, in string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, InvalidObject, path, in)
+}
+
+// disabled
+//
+// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error {
+// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method)
+// }
+func someParametersBrokenMsg(path, method, operationID string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID)
+}
+func refShouldNotHaveSiblingsMsg(path, operationID string) errors.Error {
+ return errors.New(errors.CompositeErrorCode, RefShouldNotHaveSiblingsWarning, operationID, path)
+}
diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go
new file mode 100644
index 00000000..f87abb3d
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/type.go
@@ -0,0 +1,213 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "reflect"
+ "strings"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+type typeValidator struct {
+ Path string
+ In string
+ Type spec.StringOrArray
+ Nullable bool
+ Format string
+ Options *SchemaValidatorOptions
+}
+
+func newTypeValidator(path, in string, typ spec.StringOrArray, nullable bool, format string, opts *SchemaValidatorOptions) *typeValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var t *typeValidator
+ if opts.recycleValidators {
+ t = pools.poolOfTypeValidators.BorrowValidator()
+ } else {
+ t = new(typeValidator)
+ }
+
+ t.Path = path
+ t.In = in
+ t.Type = typ
+ t.Nullable = nullable
+ t.Format = format
+ t.Options = opts
+
+ return t
+}
+
+func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) {
+ // internal type to JSON type with swagger 2.0 format (with go-openapi/strfmt extensions),
+ // see https://github.com/go-openapi/strfmt/blob/master/README.md
+ // TODO: this switch really is some sort of reverse lookup for formats. It should be provided by strfmt.
+ switch data.(type) {
+ case []byte, strfmt.Base64, *strfmt.Base64:
+ return stringType, stringFormatByte
+ case strfmt.CreditCard, *strfmt.CreditCard:
+ return stringType, stringFormatCreditCard
+ case strfmt.Date, *strfmt.Date:
+ return stringType, stringFormatDate
+ case strfmt.DateTime, *strfmt.DateTime:
+ return stringType, stringFormatDateTime
+ case strfmt.Duration, *strfmt.Duration:
+ return stringType, stringFormatDuration
+ case swag.File, *swag.File:
+ return fileType, ""
+ case strfmt.Email, *strfmt.Email:
+ return stringType, stringFormatEmail
+ case strfmt.HexColor, *strfmt.HexColor:
+ return stringType, stringFormatHexColor
+ case strfmt.Hostname, *strfmt.Hostname:
+ return stringType, stringFormatHostname
+ case strfmt.IPv4, *strfmt.IPv4:
+ return stringType, stringFormatIPv4
+ case strfmt.IPv6, *strfmt.IPv6:
+ return stringType, stringFormatIPv6
+ case strfmt.ISBN, *strfmt.ISBN:
+ return stringType, stringFormatISBN
+ case strfmt.ISBN10, *strfmt.ISBN10:
+ return stringType, stringFormatISBN10
+ case strfmt.ISBN13, *strfmt.ISBN13:
+ return stringType, stringFormatISBN13
+ case strfmt.MAC, *strfmt.MAC:
+ return stringType, stringFormatMAC
+ case strfmt.ObjectId, *strfmt.ObjectId:
+ return stringType, stringFormatBSONObjectID
+ case strfmt.Password, *strfmt.Password:
+ return stringType, stringFormatPassword
+ case strfmt.RGBColor, *strfmt.RGBColor:
+ return stringType, stringFormatRGBColor
+ case strfmt.SSN, *strfmt.SSN:
+ return stringType, stringFormatSSN
+ case strfmt.URI, *strfmt.URI:
+ return stringType, stringFormatURI
+ case strfmt.UUID, *strfmt.UUID:
+ return stringType, stringFormatUUID
+ case strfmt.UUID3, *strfmt.UUID3:
+ return stringType, stringFormatUUID3
+ case strfmt.UUID4, *strfmt.UUID4:
+ return stringType, stringFormatUUID4
+ case strfmt.UUID5, *strfmt.UUID5:
+ return stringType, stringFormatUUID5
+ // TODO: missing binary (io.ReadCloser)
+ // TODO: missing json.Number
+ default:
+ val := reflect.ValueOf(data)
+ tpe := val.Type()
+ switch tpe.Kind() { //nolint:exhaustive
+ case reflect.Bool:
+ return booleanType, ""
+ case reflect.String:
+ return stringType, ""
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ // NOTE: that is the spec. With go-openapi, is that not uint32 for unsigned integers?
+ return integerType, integerFormatInt32
+ case reflect.Int, reflect.Int64, reflect.Uint, reflect.Uint64:
+ return integerType, integerFormatInt64
+ case reflect.Float32:
+ // NOTE: is that not numberFormatFloat?
+ return numberType, numberFormatFloat32
+ case reflect.Float64:
+ // NOTE: is that not "double"?
+ return numberType, numberFormatFloat64
+ // NOTE: go arrays (reflect.Array) are not supported (fixed length)
+ case reflect.Slice:
+ return arrayType, ""
+ case reflect.Map, reflect.Struct:
+ return objectType, ""
+ case reflect.Interface:
+ // What to do here?
+ panic("dunno what to do here")
+ case reflect.Ptr:
+ return t.schemaInfoForType(reflect.Indirect(val).Interface())
+ }
+ }
+ return "", ""
+}
+
+func (t *typeValidator) SetPath(path string) {
+ t.Path = path
+}
+
+func (t *typeValidator) Applies(source interface{}, _ reflect.Kind) bool {
+ // typeValidator applies to Schema, Parameter and Header objects
+ switch source.(type) {
+ case *spec.Schema:
+ case *spec.Parameter:
+ case *spec.Header:
+ default:
+ return false
+ }
+
+ return (len(t.Type) > 0 || t.Format != "")
+}
+
+func (t *typeValidator) Validate(data interface{}) *Result {
+ if t.Options.recycleValidators {
+ defer func() {
+ t.redeem()
+ }()
+ }
+
+ if data == nil {
+ // nil or zero value for the passed structure require Type: null
+ if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // TODO: if a property is not required it also passes this
+ return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType), t.Options.recycleResult)
+ }
+
+ return emptyResult
+ }
+
+ // check if the type matches, should be used in every validator chain as first item
+ val := reflect.Indirect(reflect.ValueOf(data))
+ kind := val.Kind()
+
+ // infer schema type (JSON) and format from passed data type
+ schType, format := t.schemaInfoForType(data)
+
+ // check numerical types
+ // TODO: check unsigned ints
+ // TODO: check json.Number (see schema.go)
+ isLowerInt := t.Format == integerFormatInt64 && format == integerFormatInt32
+ isLowerFloat := t.Format == numberFormatFloat64 && format == numberFormatFloat32
+ isFloatInt := schType == numberType && swag.IsFloat64AJSONInteger(val.Float()) && t.Type.Contains(integerType)
+ isIntFloat := schType == integerType && t.Type.Contains(numberType)
+
+ if kind != reflect.String && kind != reflect.Slice && t.Format != "" && !(t.Type.Contains(schType) || format == t.Format || isFloatInt || isIntFloat || isLowerInt || isLowerFloat) {
+ // TODO: test case
+ return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format), t.Options.recycleResult)
+ }
+
+ if !(t.Type.Contains(numberType) || t.Type.Contains(integerType)) && t.Format != "" && (kind == reflect.String || kind == reflect.Slice) {
+ return emptyResult
+ }
+
+ if !(t.Type.Contains(schType) || isFloatInt || isIntFloat) {
+ return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType), t.Options.recycleResult)
+ }
+
+ return emptyResult
+}
+
+func (t *typeValidator) redeem() {
+ pools.poolOfTypeValidators.RedeemValidator(t)
+}
diff --git a/vendor/github.com/go-openapi/validate/update-fixtures.sh b/vendor/github.com/go-openapi/validate/update-fixtures.sh
new file mode 100644
index 00000000..21b06e2b
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/update-fixtures.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -eu -o pipefail
+dir=$(git rev-parse --show-toplevel)
+scratch=$(mktemp -d -t tmp.XXXXXXXXXX)
+
+function finish {
+ rm -rf "$scratch"
+}
+trap finish EXIT SIGHUP SIGINT SIGTERM
+
+cd "$scratch"
+git clone https://github.com/json-schema-org/JSON-Schema-Test-Suite Suite
+cp -r Suite/tests/draft4/* "$dir/fixtures/jsonschema_suite"
+cp -a Suite/remotes "$dir/fixtures/jsonschema_suite"
diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go
new file mode 100644
index 00000000..c083aecc
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/validator.go
@@ -0,0 +1,1051 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/spec"
+ "github.com/go-openapi/strfmt"
+)
+
+// An EntityValidator is an interface for things that can validate entities
+type EntityValidator interface {
+ Validate(interface{}) *Result
+}
+
+type valueValidator interface {
+ SetPath(path string)
+ Applies(interface{}, reflect.Kind) bool
+ Validate(interface{}) *Result
+}
+
+type itemsValidator struct {
+ items *spec.Items
+ root interface{}
+ path string
+ in string
+ validators [6]valueValidator
+ KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *itemsValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var iv *itemsValidator
+ if opts.recycleValidators {
+ iv = pools.poolOfItemsValidators.BorrowValidator()
+ } else {
+ iv = new(itemsValidator)
+ }
+
+ iv.path = path
+ iv.in = in
+ iv.items = items
+ iv.root = root
+ iv.KnownFormats = formats
+ iv.Options = opts
+ iv.validators = [6]valueValidator{
+ iv.typeValidator(),
+ iv.stringValidator(),
+ iv.formatValidator(),
+ iv.numberValidator(),
+ iv.sliceValidator(),
+ iv.commonValidator(),
+ }
+ return iv
+}
+
+func (i *itemsValidator) Validate(index int, data interface{}) *Result {
+ if i.Options.recycleValidators {
+ defer func() {
+ i.redeemChildren()
+ i.redeem()
+ }()
+ }
+
+ tpe := reflect.TypeOf(data)
+ kind := tpe.Kind()
+ var result *Result
+ if i.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+
+ path := fmt.Sprintf("%s.%d", i.path, index)
+
+ for idx, validator := range i.validators {
+ if !validator.Applies(i.root, kind) {
+ if i.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ i.validators[idx] = nil // prevents further (unsafe) usage
+ }
+
+ continue
+ }
+
+ validator.SetPath(path)
+ err := validator.Validate(data)
+ if i.Options.recycleValidators {
+ i.validators[idx] = nil // prevents further (unsafe) usage
+ }
+ if err != nil {
+ result.Inc()
+ if err.HasErrors() {
+ result.Merge(err)
+
+ break
+ }
+
+ result.Merge(err)
+ }
+ }
+
+ return result
+}
+
+func (i *itemsValidator) typeValidator() valueValidator {
+ return newTypeValidator(
+ i.path,
+ i.in,
+ spec.StringOrArray([]string{i.items.Type}),
+ i.items.Nullable,
+ i.items.Format,
+ i.Options,
+ )
+}
+
+func (i *itemsValidator) commonValidator() valueValidator {
+ return newBasicCommonValidator(
+ "",
+ i.in,
+ i.items.Default,
+ i.items.Enum,
+ i.Options,
+ )
+}
+
+func (i *itemsValidator) sliceValidator() valueValidator {
+ return newBasicSliceValidator(
+ "",
+ i.in,
+ i.items.Default,
+ i.items.MaxItems,
+ i.items.MinItems,
+ i.items.UniqueItems,
+ i.items.Items,
+ i.root,
+ i.KnownFormats,
+ i.Options,
+ )
+}
+
+func (i *itemsValidator) numberValidator() valueValidator {
+ return newNumberValidator(
+ "",
+ i.in,
+ i.items.Default,
+ i.items.MultipleOf,
+ i.items.Maximum,
+ i.items.ExclusiveMaximum,
+ i.items.Minimum,
+ i.items.ExclusiveMinimum,
+ i.items.Type,
+ i.items.Format,
+ i.Options,
+ )
+}
+
+func (i *itemsValidator) stringValidator() valueValidator {
+ return newStringValidator(
+ "",
+ i.in,
+ i.items.Default,
+ false, // Required
+ false, // AllowEmpty
+ i.items.MaxLength,
+ i.items.MinLength,
+ i.items.Pattern,
+ i.Options,
+ )
+}
+
+func (i *itemsValidator) formatValidator() valueValidator {
+ return newFormatValidator(
+ "",
+ i.in,
+ i.items.Format,
+ i.KnownFormats,
+ i.Options,
+ )
+}
+
+func (i *itemsValidator) redeem() {
+ pools.poolOfItemsValidators.RedeemValidator(i)
+}
+
+func (i *itemsValidator) redeemChildren() {
+ for idx, validator := range i.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ i.validators[idx] = nil // free up allocated children if not in pool
+ }
+}
+
+type basicCommonValidator struct {
+ Path string
+ In string
+ Default interface{}
+ Enum []interface{}
+ Options *SchemaValidatorOptions
+}
+
+func newBasicCommonValidator(path, in string, def interface{}, enum []interface{}, opts *SchemaValidatorOptions) *basicCommonValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var b *basicCommonValidator
+ if opts.recycleValidators {
+ b = pools.poolOfBasicCommonValidators.BorrowValidator()
+ } else {
+ b = new(basicCommonValidator)
+ }
+
+ b.Path = path
+ b.In = in
+ b.Default = def
+ b.Enum = enum
+ b.Options = opts
+
+ return b
+}
+
+func (b *basicCommonValidator) SetPath(path string) {
+ b.Path = path
+}
+
+func (b *basicCommonValidator) Applies(source interface{}, _ reflect.Kind) bool {
+ switch source.(type) {
+ case *spec.Parameter, *spec.Schema, *spec.Header:
+ return true
+ default:
+ return false
+ }
+}
+
+func (b *basicCommonValidator) Validate(data interface{}) (res *Result) {
+ if b.Options.recycleValidators {
+ defer func() {
+ b.redeem()
+ }()
+ }
+
+ if len(b.Enum) == 0 {
+ return nil
+ }
+
+ for _, enumValue := range b.Enum {
+ actualType := reflect.TypeOf(enumValue)
+ if actualType == nil { // Safeguard
+ continue
+ }
+
+ expectedValue := reflect.ValueOf(data)
+ if expectedValue.IsValid() &&
+ expectedValue.Type().ConvertibleTo(actualType) &&
+ reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) {
+ return nil
+ }
+ }
+
+ return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum), b.Options.recycleResult)
+}
+
+func (b *basicCommonValidator) redeem() {
+ pools.poolOfBasicCommonValidators.RedeemValidator(b)
+}
+
+// A HeaderValidator has very limited subset of validations to apply
+type HeaderValidator struct {
+ name string
+ header *spec.Header
+ validators [6]valueValidator
+ KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+// NewHeaderValidator creates a new header validator object
+func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, options ...Option) *HeaderValidator {
+ opts := new(SchemaValidatorOptions)
+ for _, o := range options {
+ o(opts)
+ }
+
+ return newHeaderValidator(name, header, formats, opts)
+}
+
+func newHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, opts *SchemaValidatorOptions) *HeaderValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var p *HeaderValidator
+ if opts.recycleValidators {
+ p = pools.poolOfHeaderValidators.BorrowValidator()
+ } else {
+ p = new(HeaderValidator)
+ }
+
+ p.name = name
+ p.header = header
+ p.KnownFormats = formats
+ p.Options = opts
+ p.validators = [6]valueValidator{
+ newTypeValidator(
+ name,
+ "header",
+ spec.StringOrArray([]string{header.Type}),
+ header.Nullable,
+ header.Format,
+ p.Options,
+ ),
+ p.stringValidator(),
+ p.formatValidator(),
+ p.numberValidator(),
+ p.sliceValidator(),
+ p.commonValidator(),
+ }
+
+ return p
+}
+
+// Validate the value of the header against its schema
+func (p *HeaderValidator) Validate(data interface{}) *Result {
+ if p.Options.recycleValidators {
+ defer func() {
+ p.redeemChildren()
+ p.redeem()
+ }()
+ }
+
+ if data == nil {
+ return nil
+ }
+
+ var result *Result
+ if p.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+
+ tpe := reflect.TypeOf(data)
+ kind := tpe.Kind()
+
+ for idx, validator := range p.validators {
+ if !validator.Applies(p.header, kind) {
+ if p.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ p.validators[idx] = nil // prevents further (unsafe) usage
+ }
+
+ continue
+ }
+
+ err := validator.Validate(data)
+ if p.Options.recycleValidators {
+ p.validators[idx] = nil // prevents further (unsafe) usage
+ }
+ if err != nil {
+ if err.HasErrors() {
+ result.Merge(err)
+ break
+ }
+ result.Merge(err)
+ }
+ }
+
+ return result
+}
+
+func (p *HeaderValidator) commonValidator() valueValidator {
+ return newBasicCommonValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ p.header.Enum,
+ p.Options,
+ )
+}
+
+func (p *HeaderValidator) sliceValidator() valueValidator {
+ return newBasicSliceValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ p.header.MaxItems,
+ p.header.MinItems,
+ p.header.UniqueItems,
+ p.header.Items,
+ p.header,
+ p.KnownFormats,
+ p.Options,
+ )
+}
+
+func (p *HeaderValidator) numberValidator() valueValidator {
+ return newNumberValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ p.header.MultipleOf,
+ p.header.Maximum,
+ p.header.ExclusiveMaximum,
+ p.header.Minimum,
+ p.header.ExclusiveMinimum,
+ p.header.Type,
+ p.header.Format,
+ p.Options,
+ )
+}
+
+func (p *HeaderValidator) stringValidator() valueValidator {
+ return newStringValidator(
+ p.name,
+ "response",
+ p.header.Default,
+ true,
+ false,
+ p.header.MaxLength,
+ p.header.MinLength,
+ p.header.Pattern,
+ p.Options,
+ )
+}
+
+func (p *HeaderValidator) formatValidator() valueValidator {
+ return newFormatValidator(
+ p.name,
+ "response",
+ p.header.Format,
+ p.KnownFormats,
+ p.Options,
+ )
+}
+
+func (p *HeaderValidator) redeem() {
+ pools.poolOfHeaderValidators.RedeemValidator(p)
+}
+
+func (p *HeaderValidator) redeemChildren() {
+ for idx, validator := range p.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ p.validators[idx] = nil // free up allocated children if not in pool
+ }
+}
+
+// A ParamValidator has very limited subset of validations to apply
+type ParamValidator struct {
+ param *spec.Parameter
+ validators [6]valueValidator
+ KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+// NewParamValidator creates a new param validator object
+func NewParamValidator(param *spec.Parameter, formats strfmt.Registry, options ...Option) *ParamValidator {
+ opts := new(SchemaValidatorOptions)
+ for _, o := range options {
+ o(opts)
+ }
+
+ return newParamValidator(param, formats, opts)
+}
+
+func newParamValidator(param *spec.Parameter, formats strfmt.Registry, opts *SchemaValidatorOptions) *ParamValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var p *ParamValidator
+ if opts.recycleValidators {
+ p = pools.poolOfParamValidators.BorrowValidator()
+ } else {
+ p = new(ParamValidator)
+ }
+
+ p.param = param
+ p.KnownFormats = formats
+ p.Options = opts
+ p.validators = [6]valueValidator{
+ newTypeValidator(
+ param.Name,
+ param.In,
+ spec.StringOrArray([]string{param.Type}),
+ param.Nullable,
+ param.Format,
+ p.Options,
+ ),
+ p.stringValidator(),
+ p.formatValidator(),
+ p.numberValidator(),
+ p.sliceValidator(),
+ p.commonValidator(),
+ }
+
+ return p
+}
+
+// Validate the data against the description of the parameter
+func (p *ParamValidator) Validate(data interface{}) *Result {
+ if data == nil {
+ return nil
+ }
+
+ var result *Result
+ if p.Options.recycleResult {
+ result = pools.poolOfResults.BorrowResult()
+ } else {
+ result = new(Result)
+ }
+
+ tpe := reflect.TypeOf(data)
+ kind := tpe.Kind()
+
+ if p.Options.recycleValidators {
+ defer func() {
+ p.redeemChildren()
+ p.redeem()
+ }()
+ }
+
+ // TODO: validate type
+ for idx, validator := range p.validators {
+ if !validator.Applies(p.param, kind) {
+ if p.Options.recycleValidators {
+ // Validate won't be called, so relinquish this validator
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ p.validators[idx] = nil // prevents further (unsafe) usage
+ }
+
+ continue
+ }
+
+ err := validator.Validate(data)
+ if p.Options.recycleValidators {
+ p.validators[idx] = nil // prevents further (unsafe) usage
+ }
+ if err != nil {
+ if err.HasErrors() {
+ result.Merge(err)
+ break
+ }
+ result.Merge(err)
+ }
+ }
+
+ return result
+}
+
+func (p *ParamValidator) commonValidator() valueValidator {
+ return newBasicCommonValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.Enum,
+ p.Options,
+ )
+}
+
+func (p *ParamValidator) sliceValidator() valueValidator {
+ return newBasicSliceValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.MaxItems,
+ p.param.MinItems,
+ p.param.UniqueItems,
+ p.param.Items,
+ p.param,
+ p.KnownFormats,
+ p.Options,
+ )
+}
+
+func (p *ParamValidator) numberValidator() valueValidator {
+ return newNumberValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.MultipleOf,
+ p.param.Maximum,
+ p.param.ExclusiveMaximum,
+ p.param.Minimum,
+ p.param.ExclusiveMinimum,
+ p.param.Type,
+ p.param.Format,
+ p.Options,
+ )
+}
+
+func (p *ParamValidator) stringValidator() valueValidator {
+ return newStringValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Default,
+ p.param.Required,
+ p.param.AllowEmptyValue,
+ p.param.MaxLength,
+ p.param.MinLength,
+ p.param.Pattern,
+ p.Options,
+ )
+}
+
+func (p *ParamValidator) formatValidator() valueValidator {
+ return newFormatValidator(
+ p.param.Name,
+ p.param.In,
+ p.param.Format,
+ p.KnownFormats,
+ p.Options,
+ )
+}
+
+func (p *ParamValidator) redeem() {
+ pools.poolOfParamValidators.RedeemValidator(p)
+}
+
+func (p *ParamValidator) redeemChildren() {
+ for idx, validator := range p.validators {
+ if validator == nil {
+ continue
+ }
+ if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
+ redeemableChildren.redeemChildren()
+ }
+ if redeemable, ok := validator.(interface{ redeem() }); ok {
+ redeemable.redeem()
+ }
+ p.validators[idx] = nil // free up allocated children if not in pool
+ }
+}
+
+type basicSliceValidator struct {
+ Path string
+ In string
+ Default interface{}
+ MaxItems *int64
+ MinItems *int64
+ UniqueItems bool
+ Items *spec.Items
+ Source interface{}
+ KnownFormats strfmt.Registry
+ Options *SchemaValidatorOptions
+}
+
+func newBasicSliceValidator(
+ path, in string,
+ def interface{}, maxItems, minItems *int64, uniqueItems bool, items *spec.Items,
+ source interface{}, formats strfmt.Registry,
+ opts *SchemaValidatorOptions) *basicSliceValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var s *basicSliceValidator
+ if opts.recycleValidators {
+ s = pools.poolOfBasicSliceValidators.BorrowValidator()
+ } else {
+ s = new(basicSliceValidator)
+ }
+
+ s.Path = path
+ s.In = in
+ s.Default = def
+ s.MaxItems = maxItems
+ s.MinItems = minItems
+ s.UniqueItems = uniqueItems
+ s.Items = items
+ s.Source = source
+ s.KnownFormats = formats
+ s.Options = opts
+
+ return s
+}
+
+func (s *basicSliceValidator) SetPath(path string) {
+ s.Path = path
+}
+
+func (s *basicSliceValidator) Applies(source interface{}, kind reflect.Kind) bool {
+ switch source.(type) {
+ case *spec.Parameter, *spec.Items, *spec.Header:
+ return kind == reflect.Slice
+ default:
+ return false
+ }
+}
+
+func (s *basicSliceValidator) Validate(data interface{}) *Result {
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeem()
+ }()
+ }
+ val := reflect.ValueOf(data)
+
+ size := int64(val.Len())
+ if s.MinItems != nil {
+ if err := MinItems(s.Path, s.In, size, *s.MinItems); err != nil {
+ return errorHelp.sErr(err, s.Options.recycleResult)
+ }
+ }
+
+ if s.MaxItems != nil {
+ if err := MaxItems(s.Path, s.In, size, *s.MaxItems); err != nil {
+ return errorHelp.sErr(err, s.Options.recycleResult)
+ }
+ }
+
+ if s.UniqueItems {
+ if err := UniqueItems(s.Path, s.In, data); err != nil {
+ return errorHelp.sErr(err, s.Options.recycleResult)
+ }
+ }
+
+ if s.Items == nil {
+ return nil
+ }
+
+ for i := 0; i < int(size); i++ {
+ itemsValidator := newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats, s.Options)
+ ele := val.Index(i)
+ if err := itemsValidator.Validate(i, ele.Interface()); err != nil {
+ if err.HasErrors() {
+ return err
+ }
+ if err.wantsRedeemOnMerge {
+ pools.poolOfResults.RedeemResult(err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *basicSliceValidator) redeem() {
+ pools.poolOfBasicSliceValidators.RedeemValidator(s)
+}
+
+type numberValidator struct {
+ Path string
+ In string
+ Default interface{}
+ MultipleOf *float64
+ Maximum *float64
+ ExclusiveMaximum bool
+ Minimum *float64
+ ExclusiveMinimum bool
+ // Allows for more accurate behavior regarding integers
+ Type string
+ Format string
+ Options *SchemaValidatorOptions
+}
+
+func newNumberValidator(
+ path, in string, def interface{},
+ multipleOf, maximum *float64, exclusiveMaximum bool, minimum *float64, exclusiveMinimum bool,
+ typ, format string,
+ opts *SchemaValidatorOptions) *numberValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var n *numberValidator
+ if opts.recycleValidators {
+ n = pools.poolOfNumberValidators.BorrowValidator()
+ } else {
+ n = new(numberValidator)
+ }
+
+ n.Path = path
+ n.In = in
+ n.Default = def
+ n.MultipleOf = multipleOf
+ n.Maximum = maximum
+ n.ExclusiveMaximum = exclusiveMaximum
+ n.Minimum = minimum
+ n.ExclusiveMinimum = exclusiveMinimum
+ n.Type = typ
+ n.Format = format
+ n.Options = opts
+
+ return n
+}
+
+func (n *numberValidator) SetPath(path string) {
+ n.Path = path
+}
+
+func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool {
+ switch source.(type) {
+ case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header:
+ isInt := kind >= reflect.Int && kind <= reflect.Uint64
+ isFloat := kind == reflect.Float32 || kind == reflect.Float64
+ return isInt || isFloat
+ default:
+ return false
+ }
+}
+
+// Validate provides a validator for generic JSON numbers,
+//
+// By default, numbers are internally represented as float64.
+// Formats float, or float32 may alter this behavior by mapping to float32.
+// A special validation process is followed for integers, with optional "format":
+// this is an attempt to provide a validation with native types.
+//
+// NOTE: since the constraint specified (boundary, multipleOf) is unmarshalled
+// as float64, loss of information remains possible (e.g. on very large integers).
+//
+// Since this value directly comes from the unmarshalling, it is not possible
+// at this stage of processing to check further and guarantee the correctness of such values.
+//
+// Normally, the JSON Number.MAX_SAFE_INTEGER (resp. Number.MIN_SAFE_INTEGER)
+// would check we do not get such a loss.
+//
+// If this is the case, replace AddErrors() by AddWarnings() and IsValid() by !HasWarnings().
+//
+// TODO: consider replacing boundary check errors by simple warnings.
+//
+// TODO: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?)
+func (n *numberValidator) Validate(val interface{}) *Result {
+ if n.Options.recycleValidators {
+ defer func() {
+ n.redeem()
+ }()
+ }
+
+ var res, resMultiple, resMinimum, resMaximum *Result
+ if n.Options.recycleResult {
+ res = pools.poolOfResults.BorrowResult()
+ } else {
+ res = new(Result)
+ }
+
+ // Used only to attempt to validate constraint on value,
+ // even though value or constraint specified do not match type and format
+ data := valueHelp.asFloat64(val)
+
+ // Is the provided value within the range of the specified numeric type and format?
+ res.AddErrors(IsValueValidAgainstRange(val, n.Type, n.Format, "Checked", n.Path))
+
+ if n.MultipleOf != nil {
+ resMultiple = pools.poolOfResults.BorrowResult()
+
+ // Is the constraint specifier within the range of the specific numeric type and format?
+ resMultiple.AddErrors(IsValueValidAgainstRange(*n.MultipleOf, n.Type, n.Format, "MultipleOf", n.Path))
+ if resMultiple.IsValid() {
+ // Constraint validated with compatible types
+ if err := MultipleOfNativeType(n.Path, n.In, val, *n.MultipleOf); err != nil {
+ resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult))
+ }
+ } else {
+ // Constraint nevertheless validated, converted as general number
+ if err := MultipleOf(n.Path, n.In, data, *n.MultipleOf); err != nil {
+ resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult))
+ }
+ }
+ }
+
+ if n.Maximum != nil {
+ resMaximum = pools.poolOfResults.BorrowResult()
+
+ // Is the constraint specifier within the range of the specific numeric type and format?
+ resMaximum.AddErrors(IsValueValidAgainstRange(*n.Maximum, n.Type, n.Format, "Maximum boundary", n.Path))
+ if resMaximum.IsValid() {
+ // Constraint validated with compatible types
+ if err := MaximumNativeType(n.Path, n.In, val, *n.Maximum, n.ExclusiveMaximum); err != nil {
+ resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
+ }
+ } else {
+ // Constraint nevertheless validated, converted as general number
+ if err := Maximum(n.Path, n.In, data, *n.Maximum, n.ExclusiveMaximum); err != nil {
+ resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
+ }
+ }
+ }
+
+ if n.Minimum != nil {
+ resMinimum = pools.poolOfResults.BorrowResult()
+
+ // Is the constraint specifier within the range of the specific numeric type and format?
+ resMinimum.AddErrors(IsValueValidAgainstRange(*n.Minimum, n.Type, n.Format, "Minimum boundary", n.Path))
+ if resMinimum.IsValid() {
+ // Constraint validated with compatible types
+ if err := MinimumNativeType(n.Path, n.In, val, *n.Minimum, n.ExclusiveMinimum); err != nil {
+ resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
+ }
+ } else {
+ // Constraint nevertheless validated, converted as general number
+ if err := Minimum(n.Path, n.In, data, *n.Minimum, n.ExclusiveMinimum); err != nil {
+ resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult))
+ }
+ }
+ }
+ res.Merge(resMultiple, resMinimum, resMaximum)
+ res.Inc()
+
+ return res
+}
+
+func (n *numberValidator) redeem() {
+ pools.poolOfNumberValidators.RedeemValidator(n)
+}
+
+type stringValidator struct {
+ Path string
+ In string
+ Default interface{}
+ Required bool
+ AllowEmptyValue bool
+ MaxLength *int64
+ MinLength *int64
+ Pattern string
+ Options *SchemaValidatorOptions
+}
+
+func newStringValidator(
+ path, in string,
+ def interface{}, required, allowEmpty bool, maxLength, minLength *int64, pattern string,
+ opts *SchemaValidatorOptions) *stringValidator {
+ if opts == nil {
+ opts = new(SchemaValidatorOptions)
+ }
+
+ var s *stringValidator
+ if opts.recycleValidators {
+ s = pools.poolOfStringValidators.BorrowValidator()
+ } else {
+ s = new(stringValidator)
+ }
+
+ s.Path = path
+ s.In = in
+ s.Default = def
+ s.Required = required
+ s.AllowEmptyValue = allowEmpty
+ s.MaxLength = maxLength
+ s.MinLength = minLength
+ s.Pattern = pattern
+ s.Options = opts
+
+ return s
+}
+
+func (s *stringValidator) SetPath(path string) {
+ s.Path = path
+}
+
+func (s *stringValidator) Applies(source interface{}, kind reflect.Kind) bool {
+ switch source.(type) {
+ case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header:
+ return kind == reflect.String
+ default:
+ return false
+ }
+}
+
+func (s *stringValidator) Validate(val interface{}) *Result {
+ if s.Options.recycleValidators {
+ defer func() {
+ s.redeem()
+ }()
+ }
+
+ data, ok := val.(string)
+ if !ok {
+ return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val), s.Options.recycleResult)
+ }
+
+ if s.Required && !s.AllowEmptyValue && (s.Default == nil || s.Default == "") {
+ if err := RequiredString(s.Path, s.In, data); err != nil {
+ return errorHelp.sErr(err, s.Options.recycleResult)
+ }
+ }
+
+ if s.MaxLength != nil {
+ if err := MaxLength(s.Path, s.In, data, *s.MaxLength); err != nil {
+ return errorHelp.sErr(err, s.Options.recycleResult)
+ }
+ }
+
+ if s.MinLength != nil {
+ if err := MinLength(s.Path, s.In, data, *s.MinLength); err != nil {
+ return errorHelp.sErr(err, s.Options.recycleResult)
+ }
+ }
+
+ if s.Pattern != "" {
+ if err := Pattern(s.Path, s.In, data, s.Pattern); err != nil {
+ return errorHelp.sErr(err, s.Options.recycleResult)
+ }
+ }
+ return nil
+}
+
+func (s *stringValidator) redeem() {
+ pools.poolOfStringValidators.RedeemValidator(s)
+}
diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go
new file mode 100644
index 00000000..5f6f5ee6
--- /dev/null
+++ b/vendor/github.com/go-openapi/validate/values.go
@@ -0,0 +1,450 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validate
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// Enum validates if the data is a member of the enum
+func Enum(path, in string, data interface{}, enum interface{}) *errors.Validation {
+ return EnumCase(path, in, data, enum, true)
+}
+
+// EnumCase validates if the data is a member of the enum and may respect case-sensitivity for strings
+func EnumCase(path, in string, data interface{}, enum interface{}, caseSensitive bool) *errors.Validation {
+ val := reflect.ValueOf(enum)
+ if val.Kind() != reflect.Slice {
+ return nil
+ }
+
+ dataString := convertEnumCaseStringKind(data, caseSensitive)
+ var values []interface{}
+ for i := 0; i < val.Len(); i++ {
+ ele := val.Index(i)
+ enumValue := ele.Interface()
+ if data != nil {
+ if reflect.DeepEqual(data, enumValue) {
+ return nil
+ }
+ enumString := convertEnumCaseStringKind(enumValue, caseSensitive)
+ if dataString != nil && enumString != nil && strings.EqualFold(*dataString, *enumString) {
+ return nil
+ }
+ actualType := reflect.TypeOf(enumValue)
+ if actualType == nil { // Safeguard. Frankly, I don't know how we may get a nil
+ continue
+ }
+ expectedValue := reflect.ValueOf(data)
+ if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
+ // Attempt comparison after type conversion
+ if reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) {
+ return nil
+ }
+ }
+ }
+ values = append(values, enumValue)
+ }
+ return errors.EnumFail(path, in, data, values)
+}
+
+// convertEnumCaseStringKind converts interface if it is kind of string and case insensitivity is set
+func convertEnumCaseStringKind(value interface{}, caseSensitive bool) *string {
+ if caseSensitive {
+ return nil
+ }
+
+ val := reflect.ValueOf(value)
+ if val.Kind() != reflect.String {
+ return nil
+ }
+
+ str := fmt.Sprintf("%v", value)
+ return &str
+}
+
+// MinItems validates that there are at least n items in a slice
+func MinItems(path, in string, size, min int64) *errors.Validation {
+ if size < min {
+ return errors.TooFewItems(path, in, min, size)
+ }
+ return nil
+}
+
+// MaxItems validates that there are at most n items in a slice
+func MaxItems(path, in string, size, max int64) *errors.Validation {
+ if size > max {
+ return errors.TooManyItems(path, in, max, size)
+ }
+ return nil
+}
+
+// UniqueItems validates that the provided slice has unique elements
+func UniqueItems(path, in string, data interface{}) *errors.Validation {
+ val := reflect.ValueOf(data)
+ if val.Kind() != reflect.Slice {
+ return nil
+ }
+ var unique []interface{}
+ for i := 0; i < val.Len(); i++ {
+ v := val.Index(i).Interface()
+ for _, u := range unique {
+ if reflect.DeepEqual(v, u) {
+ return errors.DuplicateItems(path, in)
+ }
+ }
+ unique = append(unique, v)
+ }
+ return nil
+}
+
+// MinLength validates a string for minimum length
+func MinLength(path, in, data string, minLength int64) *errors.Validation {
+ strLen := int64(utf8.RuneCountInString(data))
+ if strLen < minLength {
+ return errors.TooShort(path, in, minLength, data)
+ }
+ return nil
+}
+
+// MaxLength validates a string for maximum length
+func MaxLength(path, in, data string, maxLength int64) *errors.Validation {
+ strLen := int64(utf8.RuneCountInString(data))
+ if strLen > maxLength {
+ return errors.TooLong(path, in, maxLength, data)
+ }
+ return nil
+}
+
+// ReadOnly validates an interface for readonly
+func ReadOnly(ctx context.Context, path, in string, data interface{}) *errors.Validation {
+
+ // read only is only validated when operationType is request
+ if op := extractOperationType(ctx); op != request {
+ return nil
+ }
+
+ // data must be of zero value of its type
+ val := reflect.ValueOf(data)
+ if val.IsValid() {
+ if reflect.DeepEqual(reflect.Zero(val.Type()).Interface(), val.Interface()) {
+ return nil
+ }
+ } else {
+ return nil
+ }
+
+ return errors.ReadOnly(path, in, data)
+}
+
+// Required validates an interface for requiredness
+func Required(path, in string, data interface{}) *errors.Validation {
+ val := reflect.ValueOf(data)
+ if val.IsValid() {
+ if reflect.DeepEqual(reflect.Zero(val.Type()).Interface(), val.Interface()) {
+ return errors.Required(path, in, data)
+ }
+ return nil
+ }
+ return errors.Required(path, in, data)
+}
+
+// RequiredString validates a string for requiredness
+func RequiredString(path, in, data string) *errors.Validation {
+ if data == "" {
+ return errors.Required(path, in, data)
+ }
+ return nil
+}
+
+// RequiredNumber validates a number for requiredness
+func RequiredNumber(path, in string, data float64) *errors.Validation {
+ if data == 0 {
+ return errors.Required(path, in, data)
+ }
+ return nil
+}
+
+// Pattern validates a string against a regular expression
+func Pattern(path, in, data, pattern string) *errors.Validation {
+ re, err := compileRegexp(pattern)
+ if err != nil {
+ return errors.FailedPattern(path, in, fmt.Sprintf("%s, but pattern is invalid: %s", pattern, err.Error()), data)
+ }
+ if !re.MatchString(data) {
+ return errors.FailedPattern(path, in, pattern, data)
+ }
+ return nil
+}
+
+// MaximumInt validates if a number is smaller than a given maximum
+func MaximumInt(path, in string, data, max int64, exclusive bool) *errors.Validation {
+ if (!exclusive && data > max) || (exclusive && data >= max) {
+ return errors.ExceedsMaximumInt(path, in, max, exclusive, data)
+ }
+ return nil
+}
+
+// MaximumUint validates if a number is smaller than a given maximum
+func MaximumUint(path, in string, data, max uint64, exclusive bool) *errors.Validation {
+ if (!exclusive && data > max) || (exclusive && data >= max) {
+ return errors.ExceedsMaximumUint(path, in, max, exclusive, data)
+ }
+ return nil
+}
+
+// Maximum validates if a number is smaller than a given maximum
+func Maximum(path, in string, data, max float64, exclusive bool) *errors.Validation {
+ if (!exclusive && data > max) || (exclusive && data >= max) {
+ return errors.ExceedsMaximum(path, in, max, exclusive, data)
+ }
+ return nil
+}
+
+// Minimum validates if a number is smaller than a given minimum
+func Minimum(path, in string, data, min float64, exclusive bool) *errors.Validation {
+ if (!exclusive && data < min) || (exclusive && data <= min) {
+ return errors.ExceedsMinimum(path, in, min, exclusive, data)
+ }
+ return nil
+}
+
+// MinimumInt validates if a number is smaller than a given minimum
+func MinimumInt(path, in string, data, min int64, exclusive bool) *errors.Validation {
+ if (!exclusive && data < min) || (exclusive && data <= min) {
+ return errors.ExceedsMinimumInt(path, in, min, exclusive, data)
+ }
+ return nil
+}
+
+// MinimumUint validates if a number is smaller than a given minimum
+func MinimumUint(path, in string, data, min uint64, exclusive bool) *errors.Validation {
+ if (!exclusive && data < min) || (exclusive && data <= min) {
+ return errors.ExceedsMinimumUint(path, in, min, exclusive, data)
+ }
+ return nil
+}
+
+// MultipleOf validates if the provided number is a multiple of the factor
+func MultipleOf(path, in string, data, factor float64) *errors.Validation {
+ // multipleOf factor must be positive
+ if factor <= 0 {
+ return errors.MultipleOfMustBePositive(path, in, factor)
+ }
+ var mult float64
+ if factor < 1 {
+ mult = 1 / factor * data
+ } else {
+ mult = data / factor
+ }
+ if !swag.IsFloat64AJSONInteger(mult) {
+ return errors.NotMultipleOf(path, in, factor, data)
+ }
+ return nil
+}
+
+// MultipleOfInt validates if the provided integer is a multiple of the factor
+func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation {
+ // multipleOf factor must be positive
+ if factor <= 0 {
+ return errors.MultipleOfMustBePositive(path, in, factor)
+ }
+ mult := data / factor
+ if mult*factor != data {
+ return errors.NotMultipleOf(path, in, factor, data)
+ }
+ return nil
+}
+
+// MultipleOfUint validates if the provided unsigned integer is a multiple of the factor
+func MultipleOfUint(path, in string, data, factor uint64) *errors.Validation {
+ // multipleOf factor must be positive
+ if factor == 0 {
+ return errors.MultipleOfMustBePositive(path, in, factor)
+ }
+ mult := data / factor
+ if mult*factor != data {
+ return errors.NotMultipleOf(path, in, factor, data)
+ }
+ return nil
+}
+
+// FormatOf validates if a string matches a format in the format registry
+func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.Validation {
+ if registry == nil {
+ registry = strfmt.Default
+ }
+ if ok := registry.ContainsName(format); !ok {
+ return errors.InvalidTypeName(format)
+ }
+ if ok := registry.Validates(format, data); !ok {
+ return errors.InvalidType(path, in, format, data)
+ }
+ return nil
+}
+
+// MaximumNativeType provides native type constraint validation as a facade
+// to various numeric types versions of Maximum constraint check.
+//
+// Assumes that any possible loss conversion during conversion has been
+// checked beforehand.
+//
+// NOTE: currently, the max value is marshalled as a float64, no matter what,
+// which means there may be a loss during conversions (e.g. for very large integers)
+//
+// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free
+func MaximumNativeType(path, in string, val interface{}, max float64, exclusive bool) *errors.Validation {
+ kind := reflect.ValueOf(val).Type().Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ value := valueHelp.asInt64(val)
+ return MaximumInt(path, in, value, int64(max), exclusive)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ value := valueHelp.asUint64(val)
+ if max < 0 {
+ return errors.ExceedsMaximum(path, in, max, exclusive, val)
+ }
+ return MaximumUint(path, in, value, uint64(max), exclusive)
+ case reflect.Float32, reflect.Float64:
+ fallthrough
+ default:
+ value := valueHelp.asFloat64(val)
+ return Maximum(path, in, value, max, exclusive)
+ }
+}
+
+// MinimumNativeType provides native type constraint validation as a facade
+// to various numeric types versions of Minimum constraint check.
+//
+// Assumes that any possible loss conversion during conversion has been
+// checked beforehand.
+//
+// NOTE: currently, the min value is marshalled as a float64, no matter what,
+// which means there may be a loss during conversions (e.g. for very large integers)
+//
+// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free
+func MinimumNativeType(path, in string, val interface{}, min float64, exclusive bool) *errors.Validation {
+ kind := reflect.ValueOf(val).Type().Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ value := valueHelp.asInt64(val)
+ return MinimumInt(path, in, value, int64(min), exclusive)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ value := valueHelp.asUint64(val)
+ if min < 0 {
+ return nil
+ }
+ return MinimumUint(path, in, value, uint64(min), exclusive)
+ case reflect.Float32, reflect.Float64:
+ fallthrough
+ default:
+ value := valueHelp.asFloat64(val)
+ return Minimum(path, in, value, min, exclusive)
+ }
+}
+
+// MultipleOfNativeType provides native type constraint validation as a facade
+// to various numeric types version of MultipleOf constraint check.
+//
+// Assumes that any possible loss conversion during conversion has been
+// checked beforehand.
+//
+// NOTE: currently, the multipleOf factor is marshalled as a float64, no matter what,
+// which means there may be a loss during conversions (e.g. for very large integers)
+//
+// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free
+func MultipleOfNativeType(path, in string, val interface{}, multipleOf float64) *errors.Validation {
+ kind := reflect.ValueOf(val).Type().Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ value := valueHelp.asInt64(val)
+ return MultipleOfInt(path, in, value, int64(multipleOf))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ value := valueHelp.asUint64(val)
+ return MultipleOfUint(path, in, value, uint64(multipleOf))
+ case reflect.Float32, reflect.Float64:
+ fallthrough
+ default:
+ value := valueHelp.asFloat64(val)
+ return MultipleOf(path, in, value, multipleOf)
+ }
+}
+
+// IsValueValidAgainstRange checks that a numeric value is compatible with
+// the range defined by Type and Format, that is, may be converted without loss.
+//
+// NOTE: this check is about type capacity and not formal verification such as: 1.0 != 1L
+func IsValueValidAgainstRange(val interface{}, typeName, format, prefix, path string) error {
+ kind := reflect.ValueOf(val).Type().Kind()
+
+ // What is the string representation of val
+ var stringRep string
+ switch kind { //nolint:exhaustive
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ stringRep = swag.FormatUint64(valueHelp.asUint64(val))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ stringRep = swag.FormatInt64(valueHelp.asInt64(val))
+ case reflect.Float32, reflect.Float64:
+ stringRep = swag.FormatFloat64(valueHelp.asFloat64(val))
+ default:
+ return fmt.Errorf("%s value number range checking called with invalid (non numeric) val type in %s", prefix, path)
+ }
+
+ var errVal error
+
+ switch typeName {
+ case integerType:
+ switch format {
+ case integerFormatInt32:
+ _, errVal = swag.ConvertInt32(stringRep)
+ case integerFormatUInt32:
+ _, errVal = swag.ConvertUint32(stringRep)
+ case integerFormatUInt64:
+ _, errVal = swag.ConvertUint64(stringRep)
+ case integerFormatInt64:
+ fallthrough
+ default:
+ _, errVal = swag.ConvertInt64(stringRep)
+ }
+ case numberType:
+ fallthrough
+ default:
+ switch format {
+ case numberFormatFloat, numberFormatFloat32:
+ _, errVal = swag.ConvertFloat32(stringRep)
+ case numberFormatDouble, numberFormatFloat64:
+ fallthrough
+ default:
+ // No check can be performed here since
+ // no number beyond float64 is supported
+ }
+ }
+ if errVal != nil { // We don't report the actual errVal from strconv
+ if format != "" {
+ errVal = fmt.Errorf("%s value must be of type %s with format %s in %s", prefix, typeName, format, path)
+ } else {
+ errVal = fmt.Errorf("%s value must be of type %s (default format) in %s", prefix, typeName, path)
+ }
+ }
+ return errVal
+}
diff --git a/vendor/github.com/go-resty/resty/v2/.gitignore b/vendor/github.com/go-resty/resty/v2/.gitignore
deleted file mode 100644
index 9e856bd4..00000000
--- a/vendor/github.com/go-resty/resty/v2/.gitignore
+++ /dev/null
@@ -1,30 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-
-coverage.out
-coverage.txt
-
-# Exclude intellij IDE folders
-.idea/*
diff --git a/vendor/github.com/go-resty/resty/v2/BUILD.bazel b/vendor/github.com/go-resty/resty/v2/BUILD.bazel
deleted file mode 100644
index 03bb44c3..00000000
--- a/vendor/github.com/go-resty/resty/v2/BUILD.bazel
+++ /dev/null
@@ -1,48 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-load("@bazel_gazelle//:def.bzl", "gazelle")
-
-# gazelle:prefix github.com/go-resty/resty/v2
-# gazelle:go_naming_convention import_alias
-gazelle(name = "gazelle")
-
-go_library(
- name = "resty",
- srcs = [
- "client.go",
- "middleware.go",
- "redirect.go",
- "request.go",
- "response.go",
- "resty.go",
- "retry.go",
- "trace.go",
- "transport.go",
- "transport112.go",
- "util.go",
- ],
- importpath = "github.com/go-resty/resty/v2",
- visibility = ["//visibility:public"],
- deps = ["@org_golang_x_net//publicsuffix:go_default_library"],
-)
-
-go_test(
- name = "resty_test",
- srcs = [
- "client_test.go",
- "context_test.go",
- "example_test.go",
- "request_test.go",
- "resty_test.go",
- "retry_test.go",
- "util_test.go",
- ],
- data = glob([".testdata/*"]),
- embed = [":resty"],
- deps = ["@org_golang_x_net//proxy:go_default_library"],
-)
-
-alias(
- name = "go_default_library",
- actual = ":resty",
- visibility = ["//visibility:public"],
-)
diff --git a/vendor/github.com/go-resty/resty/v2/LICENSE b/vendor/github.com/go-resty/resty/v2/LICENSE
deleted file mode 100644
index 27326a65..00000000
--- a/vendor/github.com/go-resty/resty/v2/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015-2021 Jeevanandam M., https://myjeeva.com
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/go-resty/resty/v2/README.md b/vendor/github.com/go-resty/resty/v2/README.md
deleted file mode 100644
index 8ec65182..00000000
--- a/vendor/github.com/go-resty/resty/v2/README.md
+++ /dev/null
@@ -1,906 +0,0 @@
-
-
Resty
-Simple HTTP and REST client library for Go (inspired by Ruby rest-client)
-Features section describes in detail about Resty capabilities
-
-
-
-
-
-
Resty Communication Channels
-
-
-
-## News
-
- * v2.7.0 [released](https://github.com/go-resty/resty/releases/tag/v2.7.0) and tagged on Nov 03, 2021.
- * v2.0.0 [released](https://github.com/go-resty/resty/releases/tag/v2.0.0) and tagged on Jul 16, 2019.
- * v1.12.0 [released](https://github.com/go-resty/resty/releases/tag/v1.12.0) and tagged on Feb 27, 2019.
- * v1.0 released and tagged on Sep 25, 2017. - Resty's first version was released on Sep 15, 2015 then it grew gradually as a very handy and helpful library. Its been a two years since first release. I'm very thankful to Resty users and its [contributors](https://github.com/go-resty/resty/graphs/contributors).
-
-## Features
-
- * GET, POST, PUT, DELETE, HEAD, PATCH, OPTIONS, etc.
- * Simple and chainable methods for settings and request
- * [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Request) Body can be `string`, `[]byte`, `struct`, `map`, `slice` and `io.Reader` too
- * Auto detects `Content-Type`
- * Buffer less processing for `io.Reader`
- * Native `*http.Request` instance may be accessed during middleware and request execution via `Request.RawRequest`
- * Request Body can be read multiple times via `Request.RawRequest.GetBody()`
- * [Response](https://pkg.go.dev/github.com/go-resty/resty/v2#Response) object gives you more possibility
- * Access as `[]byte` array - `response.Body()` OR Access as `string` - `response.String()`
- * Know your `response.Time()` and when we `response.ReceivedAt()`
- * Automatic marshal and unmarshal for `JSON` and `XML` content type
- * Default is `JSON`, if you supply `struct/map` without header `Content-Type`
- * For auto-unmarshal, refer to -
- - Success scenario [Request.SetResult()](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetResult) and [Response.Result()](https://pkg.go.dev/github.com/go-resty/resty/v2#Response.Result).
- - Error scenario [Request.SetError()](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetError) and [Response.Error()](https://pkg.go.dev/github.com/go-resty/resty/v2#Response.Error).
- - Supports [RFC7807](https://tools.ietf.org/html/rfc7807) - `application/problem+json` & `application/problem+xml`
- * Resty provides an option to override [JSON Marshal/Unmarshal and XML Marshal/Unmarshal](#override-json--xml-marshalunmarshal)
- * Easy to upload one or more file(s) via `multipart/form-data`
- * Auto detects file content type
- * Request URL [Path Params (aka URI Params)](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetPathParams)
- * Backoff Retry Mechanism with retry condition function [reference](retry_test.go)
- * Resty client HTTP & REST [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.OnBeforeRequest) and [Response](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.OnAfterResponse) middlewares
- * `Request.SetContext` supported
- * Authorization option of `BasicAuth` and `Bearer` token
- * Set request `ContentLength` value for all request or particular request
- * Custom [Root Certificates](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetRootCertificate) and Client [Certificates](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetCertificates)
- * Download/Save HTTP response directly into File, like `curl -o` flag. See [SetOutputDirectory](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetOutputDirectory) & [SetOutput](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetOutput).
- * Cookies for your request and CookieJar support
- * SRV Record based request instead of Host URL
- * Client settings like `Timeout`, `RedirectPolicy`, `Proxy`, `TLSClientConfig`, `Transport`, etc.
- * Optionally allows GET request with payload, see [SetAllowGetMethodPayload](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetAllowGetMethodPayload)
- * Supports registering external JSON library into resty, see [how to use](https://github.com/go-resty/resty/issues/76#issuecomment-314015250)
- * Exposes Response reader without reading response (no auto-unmarshaling) if need be, see [how to use](https://github.com/go-resty/resty/issues/87#issuecomment-322100604)
- * Option to specify expected `Content-Type` when response `Content-Type` header missing. Refer to [#92](https://github.com/go-resty/resty/issues/92)
- * Resty design
- * Have client level settings & options and also override at Request level if you want to
- * Request and Response middleware
- * Create Multiple clients if you want to `resty.New()`
- * Supports `http.RoundTripper` implementation, see [SetTransport](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetTransport)
- * goroutine concurrent safe
- * Resty Client trace, see [Client.EnableTrace](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.EnableTrace) and [Request.EnableTrace](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.EnableTrace)
- * Since v2.4.0, trace info contains a `RequestAttempt` value, and the `Request` object contains an `Attempt` attribute
- * Debug mode - clean and informative logging presentation
- * Gzip - Go does it automatically also resty has fallback handling too
- * Works fine with `HTTP/2` and `HTTP/1.1`
- * [Bazel support](#bazel-support)
- * Easily mock Resty for testing, [for e.g.](#mocking-http-requests-using-httpmock-library)
- * Well tested client library
-
-### Included Batteries
-
- * Redirect Policies - see [how to use](#redirect-policy)
- * NoRedirectPolicy
- * FlexibleRedirectPolicy
- * DomainCheckRedirectPolicy
- * etc. [more info](redirect.go)
- * Retry Mechanism [how to use](#retries)
- * Backoff Retry
- * Conditional Retry
- * Since v2.6.0, Retry Hooks - [Client](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.AddRetryHook), [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.AddRetryHook)
- * SRV Record based request instead of Host URL [how to use](resty_test.go#L1412)
- * etc (upcoming - throw your idea's [here](https://github.com/go-resty/resty/issues)).
-
-
-#### Supported Go Versions
-
-Initially Resty started supporting `go modules` since `v1.10.0` release.
-
-Starting Resty v2 and higher versions, it fully embraces [go modules](https://github.com/golang/go/wiki/Modules) package release. It requires a Go version capable of understanding `/vN` suffixed imports:
-
-- 1.9.7+
-- 1.10.3+
-- 1.11+
-
-
-## It might be beneficial for your project :smile:
-
-Resty author also published following projects for Go Community.
-
- * [aah framework](https://aahframework.org) - A secure, flexible, rapid Go web framework.
- * [THUMBAI](https://thumbai.app) - Go Mod Repository, Go Vanity Service and Simple Proxy Server.
- * [go-model](https://github.com/jeevatkm/go-model) - Robust & Easy to use model mapper and utility methods for Go `struct`.
-
-
-## Installation
-
-```bash
-# Go Modules
-require github.com/go-resty/resty/v2 v2.7.0
-```
-
-## Usage
-
-The following samples will assist you to become as comfortable as possible with resty library.
-
-```go
-// Import resty into your code and refer it as `resty`.
-import "github.com/go-resty/resty/v2"
-```
-
-#### Simple GET
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-resp, err := client.R().
- EnableTrace().
- Get("https://httpbin.org/get")
-
-// Explore response object
-fmt.Println("Response Info:")
-fmt.Println(" Error :", err)
-fmt.Println(" Status Code:", resp.StatusCode())
-fmt.Println(" Status :", resp.Status())
-fmt.Println(" Proto :", resp.Proto())
-fmt.Println(" Time :", resp.Time())
-fmt.Println(" Received At:", resp.ReceivedAt())
-fmt.Println(" Body :\n", resp)
-fmt.Println()
-
-// Explore trace info
-fmt.Println("Request Trace Info:")
-ti := resp.Request.TraceInfo()
-fmt.Println(" DNSLookup :", ti.DNSLookup)
-fmt.Println(" ConnTime :", ti.ConnTime)
-fmt.Println(" TCPConnTime :", ti.TCPConnTime)
-fmt.Println(" TLSHandshake :", ti.TLSHandshake)
-fmt.Println(" ServerTime :", ti.ServerTime)
-fmt.Println(" ResponseTime :", ti.ResponseTime)
-fmt.Println(" TotalTime :", ti.TotalTime)
-fmt.Println(" IsConnReused :", ti.IsConnReused)
-fmt.Println(" IsConnWasIdle :", ti.IsConnWasIdle)
-fmt.Println(" ConnIdleTime :", ti.ConnIdleTime)
-fmt.Println(" RequestAttempt:", ti.RequestAttempt)
-fmt.Println(" RemoteAddr :", ti.RemoteAddr.String())
-
-/* Output
-Response Info:
- Error :
- Status Code: 200
- Status : 200 OK
- Proto : HTTP/2.0
- Time : 457.034718ms
- Received At: 2020-09-14 15:35:29.784681 -0700 PDT m=+0.458137045
- Body :
- {
- "args": {},
- "headers": {
- "Accept-Encoding": "gzip",
- "Host": "httpbin.org",
- "User-Agent": "go-resty/2.4.0 (https://github.com/go-resty/resty)",
- "X-Amzn-Trace-Id": "Root=1-5f5ff031-000ff6292204aa6898e4de49"
- },
- "origin": "0.0.0.0",
- "url": "https://httpbin.org/get"
- }
-
-Request Trace Info:
- DNSLookup : 4.074657ms
- ConnTime : 381.709936ms
- TCPConnTime : 77.428048ms
- TLSHandshake : 299.623597ms
- ServerTime : 75.414703ms
- ResponseTime : 79.337µs
- TotalTime : 457.034718ms
- IsConnReused : false
- IsConnWasIdle : false
- ConnIdleTime : 0s
- RequestAttempt: 1
- RemoteAddr : 3.221.81.55:443
-*/
-```
-
-#### Enhanced GET
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-resp, err := client.R().
- SetQueryParams(map[string]string{
- "page_no": "1",
- "limit": "20",
- "sort":"name",
- "order": "asc",
- "random":strconv.FormatInt(time.Now().Unix(), 10),
- }).
- SetHeader("Accept", "application/json").
- SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
- Get("/search_result")
-
-
-// Sample of using Request.SetQueryString method
-resp, err := client.R().
- SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more").
- SetHeader("Accept", "application/json").
- SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
- Get("/show_product")
-
-
-// If necessary, you can force response content type to tell Resty to parse a JSON response into your struct
-resp, err := client.R().
- SetResult(result).
- ForceContentType("application/json").
- Get("v2/alpine/manifests/latest")
-```
-
-#### Various POST method combinations
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// POST JSON string
-// No need to set content type, if you have client level setting
-resp, err := client.R().
- SetHeader("Content-Type", "application/json").
- SetBody(`{"username":"testuser", "password":"testpass"}`).
- SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
- Post("https://myapp.com/login")
-
-// POST []byte array
-// No need to set content type, if you have client level setting
-resp, err := client.R().
- SetHeader("Content-Type", "application/json").
- SetBody([]byte(`{"username":"testuser", "password":"testpass"}`)).
- SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
- Post("https://myapp.com/login")
-
-// POST Struct, default is JSON content type. No need to set one
-resp, err := client.R().
- SetBody(User{Username: "testuser", Password: "testpass"}).
- SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
- SetError(&AuthError{}). // or SetError(AuthError{}).
- Post("https://myapp.com/login")
-
-// POST Map, default is JSON content type. No need to set one
-resp, err := client.R().
- SetBody(map[string]interface{}{"username": "testuser", "password": "testpass"}).
- SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
- SetError(&AuthError{}). // or SetError(AuthError{}).
- Post("https://myapp.com/login")
-
-// POST of raw bytes for file upload. For example: upload file to Dropbox
-fileBytes, _ := ioutil.ReadFile("/Users/jeeva/mydocument.pdf")
-
-// See we are not setting content-type header, since go-resty automatically detects Content-Type for you
-resp, err := client.R().
- SetBody(fileBytes).
- SetContentLength(true). // Dropbox expects this value
- SetAuthToken("").
- SetError(&DropboxError{}). // or SetError(DropboxError{}).
- Post("https://content.dropboxapi.com/1/files_put/auto/resty/mydocument.pdf") // for upload Dropbox supports PUT too
-
-// Note: resty detects Content-Type for request body/payload if content type header is not set.
-// * For struct and map data type defaults to 'application/json'
-// * Fallback is plain text content type
-```
-
-#### Sample PUT
-
-You can use various combinations of `PUT` method call like demonstrated for `POST`.
-
-```go
-// Note: This is one sample of PUT method usage, refer POST for more combination
-
-// Create a Resty Client
-client := resty.New()
-
-// Request goes as JSON content type
-// No need to set auth token, error, if you have client level settings
-resp, err := client.R().
- SetBody(Article{
- Title: "go-resty",
- Content: "This is my article content, oh ya!",
- Author: "Jeevanandam M",
- Tags: []string{"article", "sample", "resty"},
- }).
- SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
- SetError(&Error{}). // or SetError(Error{}).
- Put("https://myapp.com/article/1234")
-```
-
-#### Sample PATCH
-
-You can use various combinations of `PATCH` method call like demonstrated for `POST`.
-
-```go
-// Note: This is one sample of PUT method usage, refer POST for more combination
-
-// Create a Resty Client
-client := resty.New()
-
-// Request goes as JSON content type
-// No need to set auth token, error, if you have client level settings
-resp, err := client.R().
- SetBody(Article{
- Tags: []string{"new tag1", "new tag2"},
- }).
- SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
- SetError(&Error{}). // or SetError(Error{}).
- Patch("https://myapp.com/articles/1234")
-```
-
-#### Sample DELETE, HEAD, OPTIONS
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// DELETE a article
-// No need to set auth token, error, if you have client level settings
-resp, err := client.R().
- SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
- SetError(&Error{}). // or SetError(Error{}).
- Delete("https://myapp.com/articles/1234")
-
-// DELETE a articles with payload/body as a JSON string
-// No need to set auth token, error, if you have client level settings
-resp, err := client.R().
- SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
- SetError(&Error{}). // or SetError(Error{}).
- SetHeader("Content-Type", "application/json").
- SetBody(`{article_ids: [1002, 1006, 1007, 87683, 45432] }`).
- Delete("https://myapp.com/articles")
-
-// HEAD of resource
-// No need to set auth token, if you have client level settings
-resp, err := client.R().
- SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
- Head("https://myapp.com/videos/hi-res-video")
-
-// OPTIONS of resource
-// No need to set auth token, if you have client level settings
-resp, err := client.R().
- SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
- Options("https://myapp.com/servers/nyc-dc-01")
-```
-
-#### Override JSON & XML Marshal/Unmarshal
-
-User could register choice of JSON/XML library into resty or write your own. By default resty registers standard `encoding/json` and `encoding/xml` respectively.
-```go
-// Example of registering json-iterator
-import jsoniter "github.com/json-iterator/go"
-
-json := jsoniter.ConfigCompatibleWithStandardLibrary
-
-client := resty.New()
-client.JSONMarshal = json.Marshal
-client.JSONUnmarshal = json.Unmarshal
-
-// similarly user could do for XML too with -
-client.XMLMarshal
-client.XMLUnmarshal
-```
-
-### Multipart File(s) upload
-
-#### Using io.Reader
-
-```go
-profileImgBytes, _ := ioutil.ReadFile("/Users/jeeva/test-img.png")
-notesBytes, _ := ioutil.ReadFile("/Users/jeeva/text-file.txt")
-
-// Create a Resty Client
-client := resty.New()
-
-resp, err := client.R().
- SetFileReader("profile_img", "test-img.png", bytes.NewReader(profileImgBytes)).
- SetFileReader("notes", "text-file.txt", bytes.NewReader(notesBytes)).
- SetFormData(map[string]string{
- "first_name": "Jeevanandam",
- "last_name": "M",
- }).
- Post("http://myapp.com/upload")
-```
-
-#### Using File directly from Path
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// Single file scenario
-resp, err := client.R().
- SetFile("profile_img", "/Users/jeeva/test-img.png").
- Post("http://myapp.com/upload")
-
-// Multiple files scenario
-resp, err := client.R().
- SetFiles(map[string]string{
- "profile_img": "/Users/jeeva/test-img.png",
- "notes": "/Users/jeeva/text-file.txt",
- }).
- Post("http://myapp.com/upload")
-
-// Multipart of form fields and files
-resp, err := client.R().
- SetFiles(map[string]string{
- "profile_img": "/Users/jeeva/test-img.png",
- "notes": "/Users/jeeva/text-file.txt",
- }).
- SetFormData(map[string]string{
- "first_name": "Jeevanandam",
- "last_name": "M",
- "zip_code": "00001",
- "city": "my city",
- "access_token": "C6A79608-782F-4ED0-A11D-BD82FAD829CD",
- }).
- Post("http://myapp.com/profile")
-```
-
-#### Sample Form submission
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// just mentioning about POST as an example with simple flow
-// User Login
-resp, err := client.R().
- SetFormData(map[string]string{
- "username": "jeeva",
- "password": "mypass",
- }).
- Post("http://myapp.com/login")
-
-// Followed by profile update
-resp, err := client.R().
- SetFormData(map[string]string{
- "first_name": "Jeevanandam",
- "last_name": "M",
- "zip_code": "00001",
- "city": "new city update",
- }).
- Post("http://myapp.com/profile")
-
-// Multi value form data
-criteria := url.Values{
- "search_criteria": []string{"book", "glass", "pencil"},
-}
-resp, err := client.R().
- SetFormDataFromValues(criteria).
- Post("http://myapp.com/search")
-```
-
-#### Save HTTP Response into File
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// Setting output directory path, If directory not exists then resty creates one!
-// This is optional one, if you're planning using absoule path in
-// `Request.SetOutput` and can used together.
-client.SetOutputDirectory("/Users/jeeva/Downloads")
-
-// HTTP response gets saved into file, similar to curl -o flag
-_, err := client.R().
- SetOutput("plugin/ReplyWithHeader-v5.1-beta.zip").
- Get("http://bit.ly/1LouEKr")
-
-// OR using absolute path
-// Note: output directory path is not used for absolute path
-_, err := client.R().
- SetOutput("/MyDownloads/plugin/ReplyWithHeader-v5.1-beta.zip").
- Get("http://bit.ly/1LouEKr")
-```
-
-#### Request URL Path Params
-
-Resty provides easy to use dynamic request URL path params. Params can be set at client and request level. Client level params value can be overridden at request level.
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-client.R().SetPathParams(map[string]string{
- "userId": "sample@sample.com",
- "subAccountId": "100002",
-}).
-Get("/v1/users/{userId}/{subAccountId}/details")
-
-// Result:
-// Composed URL - /v1/users/sample@sample.com/100002/details
-```
-
-#### Request and Response Middleware
-
-Resty provides middleware ability to manipulate for Request and Response. It is more flexible than callback approach.
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// Registering Request Middleware
-client.OnBeforeRequest(func(c *resty.Client, req *resty.Request) error {
- // Now you have access to Client and current Request object
- // manipulate it as per your need
-
- return nil // if its success otherwise return error
- })
-
-// Registering Response Middleware
-client.OnAfterResponse(func(c *resty.Client, resp *resty.Response) error {
- // Now you have access to Client and current Response object
- // manipulate it as per your need
-
- return nil // if its success otherwise return error
- })
-```
-
-#### OnError Hooks
-
-Resty provides OnError hooks that may be called because:
-
-- The client failed to send the request due to connection timeout, TLS handshake failure, etc...
-- The request was retried the maximum amount of times, and still failed.
-
-If there was a response from the server, the original error will be wrapped in `*resty.ResponseError` which contains the last response received.
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-client.OnError(func(req *resty.Request, err error) {
- if v, ok := err.(*resty.ResponseError); ok {
- // v.Response contains the last response from the server
- // v.Err contains the original error
- }
- // Log the error, increment a metric, etc...
-})
-```
-
-#### Redirect Policy
-
-Resty provides few ready to use redirect policy(s) also it supports multiple policies together.
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// Assign Client Redirect Policy. Create one as per you need
-client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(15))
-
-// Wanna multiple policies such as redirect count, domain name check, etc
-client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(20),
- resty.DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
-```
-
-##### Custom Redirect Policy
-
-Implement [RedirectPolicy](redirect.go#L20) interface and register it with resty client. Have a look [redirect.go](redirect.go) for more information.
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// Using raw func into resty.SetRedirectPolicy
-client.SetRedirectPolicy(resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
- // Implement your logic here
-
- // return nil for continue redirect otherwise return error to stop/prevent redirect
- return nil
-}))
-
-//---------------------------------------------------
-
-// Using struct create more flexible redirect policy
-type CustomRedirectPolicy struct {
- // variables goes here
-}
-
-func (c *CustomRedirectPolicy) Apply(req *http.Request, via []*http.Request) error {
- // Implement your logic here
-
- // return nil for continue redirect otherwise return error to stop/prevent redirect
- return nil
-}
-
-// Registering in resty
-client.SetRedirectPolicy(CustomRedirectPolicy{/* initialize variables */})
-```
-
-#### Custom Root Certificates and Client Certificates
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// Custom Root certificates, just supply .pem file.
-// you can add one or more root certificates, its get appended
-client.SetRootCertificate("/path/to/root/pemFile1.pem")
-client.SetRootCertificate("/path/to/root/pemFile2.pem")
-// ... and so on!
-
-// Adding Client Certificates, you add one or more certificates
-// Sample for creating certificate object
-// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data.
-cert1, err := tls.LoadX509KeyPair("certs/client.pem", "certs/client.key")
-if err != nil {
- log.Fatalf("ERROR client certificate: %s", err)
-}
-// ...
-
-// You add one or more certificates
-client.SetCertificates(cert1, cert2, cert3)
-```
-
-#### Custom Root Certificates and Client Certificates from string
-
-```go
-// Custom Root certificates from string
-// You can pass you certificates throught env variables as strings
-// you can add one or more root certificates, its get appended
-client.SetRootCertificateFromString("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----")
-client.SetRootCertificateFromString("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----")
-// ... and so on!
-
-// Adding Client Certificates, you add one or more certificates
-// Sample for creating certificate object
-// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data.
-cert1, err := tls.X509KeyPair([]byte("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----"), []byte("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----"))
-if err != nil {
- log.Fatalf("ERROR client certificate: %s", err)
-}
-// ...
-
-// You add one or more certificates
-client.SetCertificates(cert1, cert2, cert3)
-```
-
-#### Proxy Settings - Client as well as at Request Level
-
-Default `Go` supports Proxy via environment variable `HTTP_PROXY`. Resty provides support via `SetProxy` & `RemoveProxy`.
-Choose as per your need.
-
-**Client Level Proxy** settings applied to all the request
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// Setting a Proxy URL and Port
-client.SetProxy("http://proxyserver:8888")
-
-// Want to remove proxy setting
-client.RemoveProxy()
-```
-
-#### Retries
-
-Resty uses [backoff](http://www.awsarchitectureblog.com/2015/03/backoff.html)
-to increase retry intervals after each attempt.
-
-Usage example:
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// Retries are configured per client
-client.
- // Set retry count to non zero to enable retries
- SetRetryCount(3).
- // You can override initial retry wait time.
- // Default is 100 milliseconds.
- SetRetryWaitTime(5 * time.Second).
- // MaxWaitTime can be overridden as well.
- // Default is 2 seconds.
- SetRetryMaxWaitTime(20 * time.Second).
- // SetRetryAfter sets callback to calculate wait time between retries.
- // Default (nil) implies exponential backoff with jitter
- SetRetryAfter(func(client *resty.Client, resp *resty.Response) (time.Duration, error) {
- return 0, errors.New("quota exceeded")
- })
-```
-
-Above setup will result in resty retrying requests returned non nil error up to
-3 times with delay increased after each attempt.
-
-You can optionally provide client with [custom retry conditions](https://pkg.go.dev/github.com/go-resty/resty/v2#RetryConditionFunc):
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-client.AddRetryCondition(
- // RetryConditionFunc type is for retry condition function
- // input: non-nil Response OR request execution error
- func(r *resty.Response, err error) bool {
- return r.StatusCode() == http.StatusTooManyRequests
- },
-)
-```
-
-Above example will make resty retry requests ended with `429 Too Many Requests`
-status code.
-
-Multiple retry conditions can be added.
-
-It is also possible to use `resty.Backoff(...)` to get arbitrary retry scenarios
-implemented. [Reference](retry_test.go).
-
-#### Allow GET request with Payload
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// Allow GET request with Payload. This is disabled by default.
-client.SetAllowGetMethodPayload(true)
-```
-
-#### Wanna Multiple Clients
-
-```go
-// Here you go!
-// Client 1
-client1 := resty.New()
-client1.R().Get("http://httpbin.org")
-// ...
-
-// Client 2
-client2 := resty.New()
-client2.R().Head("http://httpbin.org")
-// ...
-
-// Bend it as per your need!!!
-```
-
-#### Remaining Client Settings & its Options
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// Unique settings at Client level
-//--------------------------------
-// Enable debug mode
-client.SetDebug(true)
-
-// Assign Client TLSClientConfig
-// One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
-client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
-
-// or One can disable security check (https)
-client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
-
-// Set client timeout as per your need
-client.SetTimeout(1 * time.Minute)
-
-
-// You can override all below settings and options at request level if you want to
-//--------------------------------------------------------------------------------
-// Host URL for all request. So you can use relative URL in the request
-client.SetHostURL("http://httpbin.org")
-
-// Headers for all request
-client.SetHeader("Accept", "application/json")
-client.SetHeaders(map[string]string{
- "Content-Type": "application/json",
- "User-Agent": "My custom User Agent String",
- })
-
-// Cookies for all request
-client.SetCookie(&http.Cookie{
- Name:"go-resty",
- Value:"This is cookie value",
- Path: "/",
- Domain: "sample.com",
- MaxAge: 36000,
- HttpOnly: true,
- Secure: false,
- })
-client.SetCookies(cookies)
-
-// URL query parameters for all request
-client.SetQueryParam("user_id", "00001")
-client.SetQueryParams(map[string]string{ // sample of those who use this manner
- "api_key": "api-key-here",
- "api_secert": "api-secert",
- })
-client.R().SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
-
-// Form data for all request. Typically used with POST and PUT
-client.SetFormData(map[string]string{
- "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
- })
-
-// Basic Auth for all request
-client.SetBasicAuth("myuser", "mypass")
-
-// Bearer Auth Token for all request
-client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
-
-// Enabling Content length value for all request
-client.SetContentLength(true)
-
-// Registering global Error object structure for JSON/XML request
-client.SetError(&Error{}) // or resty.SetError(Error{})
-```
-
-#### Unix Socket
-
-```go
-unixSocket := "/var/run/my_socket.sock"
-
-// Create a Go's http.Transport so we can set it in resty.
-transport := http.Transport{
- Dial: func(_, _ string) (net.Conn, error) {
- return net.Dial("unix", unixSocket)
- },
-}
-
-// Create a Resty Client
-client := resty.New()
-
-// Set the previous transport that we created, set the scheme of the communication to the
-// socket and set the unixSocket as the HostURL.
-client.SetTransport(&transport).SetScheme("http").SetHostURL(unixSocket)
-
-// No need to write the host's URL on the request, just the path.
-client.R().Get("/index.html")
-```
-
-#### Bazel Support
-
-Resty can be built, tested and depended upon via [Bazel](https://bazel.build).
-For example, to run all tests:
-
-```shell
-bazel test :resty_test
-```
-
-#### Mocking http requests using [httpmock](https://github.com/jarcoal/httpmock) library
-
-In order to mock the http requests when testing your application you
-could use the `httpmock` library.
-
-When using the default resty client, you should pass the client to the library as follow:
-
-```go
-// Create a Resty Client
-client := resty.New()
-
-// Get the underlying HTTP Client and set it to Mock
-httpmock.ActivateNonDefault(client.GetClient())
-```
-
-More detailed example of mocking resty http requests using ginko could be found [here](https://github.com/jarcoal/httpmock#ginkgo--resty-example).
-
-## Versioning
-
-Resty releases versions according to [Semantic Versioning](http://semver.org)
-
- * Resty v2 does not use `gopkg.in` service for library versioning.
- * Resty fully adapted to `go mod` capabilities since `v1.10.0` release.
- * Resty v1 series was using `gopkg.in` to provide versioning. `gopkg.in/resty.vX` points to appropriate tagged versions; `X` denotes version series number and it's a stable release for production use. For e.g. `gopkg.in/resty.v0`.
- * Development takes place at the master branch. Although the code in master should always compile and test successfully, it might break API's. I aim to maintain backwards compatibility, but sometimes API's and behavior might be changed to fix a bug.
-
-## Contribution
-
-I would welcome your contribution! If you find any improvement or issue you want to fix, feel free to send a pull request, I like pull requests that include test cases for fix/enhancement. I have done my best to bring pretty good code coverage. Feel free to write tests.
-
-BTW, I'd like to know what you think about `Resty`. Kindly open an issue or send me an email; it'd mean a lot to me.
-
-## Creator
-
-[Jeevanandam M.](https://github.com/jeevatkm) (jeeva@myjeeva.com)
-
-## Core Team
-
-Have a look on [Members](https://github.com/orgs/go-resty/people) page.
-
-## Contributors
-
-Have a look on [Contributors](https://github.com/go-resty/resty/graphs/contributors) page.
-
-## License
-
-Resty released under MIT license, refer [LICENSE](LICENSE) file.
diff --git a/vendor/github.com/go-resty/resty/v2/WORKSPACE b/vendor/github.com/go-resty/resty/v2/WORKSPACE
deleted file mode 100644
index 9ef03e95..00000000
--- a/vendor/github.com/go-resty/resty/v2/WORKSPACE
+++ /dev/null
@@ -1,31 +0,0 @@
-workspace(name = "resty")
-
-load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
-
-http_archive(
- name = "io_bazel_rules_go",
- sha256 = "69de5c704a05ff37862f7e0f5534d4f479418afc21806c887db544a316f3cb6b",
- urls = [
- "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
- "https://github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
- ],
-)
-
-http_archive(
- name = "bazel_gazelle",
- sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
- urls = [
- "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
- "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
- ],
-)
-
-load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
-
-go_rules_dependencies()
-
-go_register_toolchains(version = "1.16")
-
-load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
-
-gazelle_dependencies()
diff --git a/vendor/github.com/go-resty/resty/v2/client.go b/vendor/github.com/go-resty/resty/v2/client.go
deleted file mode 100644
index 1a03efa3..00000000
--- a/vendor/github.com/go-resty/resty/v2/client.go
+++ /dev/null
@@ -1,1115 +0,0 @@
-// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
-// resty source code and usage is governed by a MIT style
-// license that can be found in the LICENSE file.
-
-package resty
-
-import (
- "bytes"
- "compress/gzip"
- "crypto/tls"
- "crypto/x509"
- "encoding/json"
- "encoding/xml"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "net/http"
- "net/url"
- "reflect"
- "regexp"
- "strings"
- "sync"
- "time"
-)
-
-const (
- // MethodGet HTTP method
- MethodGet = "GET"
-
- // MethodPost HTTP method
- MethodPost = "POST"
-
- // MethodPut HTTP method
- MethodPut = "PUT"
-
- // MethodDelete HTTP method
- MethodDelete = "DELETE"
-
- // MethodPatch HTTP method
- MethodPatch = "PATCH"
-
- // MethodHead HTTP method
- MethodHead = "HEAD"
-
- // MethodOptions HTTP method
- MethodOptions = "OPTIONS"
-)
-
-var (
- hdrUserAgentKey = http.CanonicalHeaderKey("User-Agent")
- hdrAcceptKey = http.CanonicalHeaderKey("Accept")
- hdrContentTypeKey = http.CanonicalHeaderKey("Content-Type")
- hdrContentLengthKey = http.CanonicalHeaderKey("Content-Length")
- hdrContentEncodingKey = http.CanonicalHeaderKey("Content-Encoding")
- hdrLocationKey = http.CanonicalHeaderKey("Location")
-
- plainTextType = "text/plain; charset=utf-8"
- jsonContentType = "application/json"
- formContentType = "application/x-www-form-urlencoded"
-
- jsonCheck = regexp.MustCompile(`(?i:(application|text)/(json|.*\+json|json\-.*)(;|$))`)
- xmlCheck = regexp.MustCompile(`(?i:(application|text)/(xml|.*\+xml)(;|$))`)
-
- hdrUserAgentValue = "go-resty/" + Version + " (https://github.com/go-resty/resty)"
- bufPool = &sync.Pool{New: func() interface{} { return &bytes.Buffer{} }}
-)
-
-type (
- // RequestMiddleware type is for request middleware, called before a request is sent
- RequestMiddleware func(*Client, *Request) error
-
- // ResponseMiddleware type is for response middleware, called after a response has been received
- ResponseMiddleware func(*Client, *Response) error
-
- // PreRequestHook type is for the request hook, called right before the request is sent
- PreRequestHook func(*Client, *http.Request) error
-
- // RequestLogCallback type is for request logs, called before the request is logged
- RequestLogCallback func(*RequestLog) error
-
- // ResponseLogCallback type is for response logs, called before the response is logged
- ResponseLogCallback func(*ResponseLog) error
-
- // ErrorHook type is for reacting to request errors, called after all retries were attempted
- ErrorHook func(*Request, error)
-)
-
-// Client struct is used to create Resty client with client level settings,
-// these settings are applicable to all the request raised from the client.
-//
-// Resty also provides an options to override most of the client settings
-// at request level.
-type Client struct {
- BaseURL string
- HostURL string // Deprecated: use BaseURL instead. To be removed in v3.0.0 release.
- QueryParam url.Values
- FormData url.Values
- PathParams map[string]string
- Header http.Header
- UserInfo *User
- Token string
- AuthScheme string
- Cookies []*http.Cookie
- Error reflect.Type
- Debug bool
- DisableWarn bool
- AllowGetMethodPayload bool
- RetryCount int
- RetryWaitTime time.Duration
- RetryMaxWaitTime time.Duration
- RetryConditions []RetryConditionFunc
- RetryHooks []OnRetryFunc
- RetryAfter RetryAfterFunc
- JSONMarshal func(v interface{}) ([]byte, error)
- JSONUnmarshal func(data []byte, v interface{}) error
- XMLMarshal func(v interface{}) ([]byte, error)
- XMLUnmarshal func(data []byte, v interface{}) error
-
- // HeaderAuthorizationKey is used to set/access Request Authorization header
- // value when `SetAuthToken` option is used.
- HeaderAuthorizationKey string
-
- jsonEscapeHTML bool
- setContentLength bool
- closeConnection bool
- notParseResponse bool
- trace bool
- debugBodySizeLimit int64
- outputDirectory string
- scheme string
- log Logger
- httpClient *http.Client
- proxyURL *url.URL
- beforeRequest []RequestMiddleware
- udBeforeRequest []RequestMiddleware
- preReqHook PreRequestHook
- afterResponse []ResponseMiddleware
- requestLog RequestLogCallback
- responseLog ResponseLogCallback
- errorHooks []ErrorHook
-}
-
-// User type is to hold an username and password information
-type User struct {
- Username, Password string
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Client methods
-//___________________________________
-
-// SetHostURL method is to set Host URL in the client instance. It will be used with request
-// raised from this client with relative URL
-// // Setting HTTP address
-// client.SetHostURL("http://myjeeva.com")
-//
-// // Setting HTTPS address
-// client.SetHostURL("https://myjeeva.com")
-//
-// Deprecated: use SetBaseURL instead. To be removed in v3.0.0 release.
-func (c *Client) SetHostURL(url string) *Client {
- c.SetBaseURL(url)
- return c
-}
-
-// SetBaseURL method is to set Base URL in the client instance. It will be used with request
-// raised from this client with relative URL
-// // Setting HTTP address
-// client.SetBaseURL("http://myjeeva.com")
-//
-// // Setting HTTPS address
-// client.SetBaseURL("https://myjeeva.com")
-//
-// Since v2.7.0
-func (c *Client) SetBaseURL(url string) *Client {
- c.BaseURL = strings.TrimRight(url, "/")
- c.HostURL = c.BaseURL
- return c
-}
-
-// SetHeader method sets a single header field and its value in the client instance.
-// These headers will be applied to all requests raised from this client instance.
-// Also it can be overridden at request level header options.
-//
-// See `Request.SetHeader` or `Request.SetHeaders`.
-//
-// For Example: To set `Content-Type` and `Accept` as `application/json`
-//
-// client.
-// SetHeader("Content-Type", "application/json").
-// SetHeader("Accept", "application/json")
-func (c *Client) SetHeader(header, value string) *Client {
- c.Header.Set(header, value)
- return c
-}
-
-// SetHeaders method sets multiple headers field and its values at one go in the client instance.
-// These headers will be applied to all requests raised from this client instance. Also it can be
-// overridden at request level headers options.
-//
-// See `Request.SetHeaders` or `Request.SetHeader`.
-//
-// For Example: To set `Content-Type` and `Accept` as `application/json`
-//
-// client.SetHeaders(map[string]string{
-// "Content-Type": "application/json",
-// "Accept": "application/json",
-// })
-func (c *Client) SetHeaders(headers map[string]string) *Client {
- for h, v := range headers {
- c.Header.Set(h, v)
- }
- return c
-}
-
-// SetHeaderVerbatim method is to set a single header field and its value verbatim in the current request.
-//
-// For Example: To set `all_lowercase` and `UPPERCASE` as `available`.
-// client.R().
-// SetHeaderVerbatim("all_lowercase", "available").
-// SetHeaderVerbatim("UPPERCASE", "available")
-//
-// Also you can override header value, which was set at client instance level.
-//
-// Since v2.6.0
-func (c *Client) SetHeaderVerbatim(header, value string) *Client {
- c.Header[header] = []string{value}
- return c
-}
-
-// SetCookieJar method sets custom http.CookieJar in the resty client. Its way to override default.
-//
-// For Example: sometimes we don't want to save cookies in api contacting, we can remove the default
-// CookieJar in resty client.
-//
-// client.SetCookieJar(nil)
-func (c *Client) SetCookieJar(jar http.CookieJar) *Client {
- c.httpClient.Jar = jar
- return c
-}
-
-// SetCookie method appends a single cookie in the client instance.
-// These cookies will be added to all the request raised from this client instance.
-// client.SetCookie(&http.Cookie{
-// Name:"go-resty",
-// Value:"This is cookie value",
-// })
-func (c *Client) SetCookie(hc *http.Cookie) *Client {
- c.Cookies = append(c.Cookies, hc)
- return c
-}
-
-// SetCookies method sets an array of cookies in the client instance.
-// These cookies will be added to all the request raised from this client instance.
-// cookies := []*http.Cookie{
-// &http.Cookie{
-// Name:"go-resty-1",
-// Value:"This is cookie 1 value",
-// },
-// &http.Cookie{
-// Name:"go-resty-2",
-// Value:"This is cookie 2 value",
-// },
-// }
-//
-// // Setting a cookies into resty
-// client.SetCookies(cookies)
-func (c *Client) SetCookies(cs []*http.Cookie) *Client {
- c.Cookies = append(c.Cookies, cs...)
- return c
-}
-
-// SetQueryParam method sets single parameter and its value in the client instance.
-// It will be formed as query string for the request.
-//
-// For Example: `search=kitchen%20papers&size=large`
-// in the URL after `?` mark. These query params will be added to all the request raised from
-// this client instance. Also it can be overridden at request level Query Param options.
-//
-// See `Request.SetQueryParam` or `Request.SetQueryParams`.
-// client.
-// SetQueryParam("search", "kitchen papers").
-// SetQueryParam("size", "large")
-func (c *Client) SetQueryParam(param, value string) *Client {
- c.QueryParam.Set(param, value)
- return c
-}
-
-// SetQueryParams method sets multiple parameters and their values at one go in the client instance.
-// It will be formed as query string for the request.
-//
-// For Example: `search=kitchen%20papers&size=large`
-// in the URL after `?` mark. These query params will be added to all the request raised from this
-// client instance. Also it can be overridden at request level Query Param options.
-//
-// See `Request.SetQueryParams` or `Request.SetQueryParam`.
-// client.SetQueryParams(map[string]string{
-// "search": "kitchen papers",
-// "size": "large",
-// })
-func (c *Client) SetQueryParams(params map[string]string) *Client {
- for p, v := range params {
- c.SetQueryParam(p, v)
- }
- return c
-}
-
-// SetFormData method sets Form parameters and their values in the client instance.
-// It's applicable only HTTP method `POST` and `PUT` and requets content type would be set as
-// `application/x-www-form-urlencoded`. These form data will be added to all the request raised from
-// this client instance. Also it can be overridden at request level form data.
-//
-// See `Request.SetFormData`.
-// client.SetFormData(map[string]string{
-// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
-// "user_id": "3455454545",
-// })
-func (c *Client) SetFormData(data map[string]string) *Client {
- for k, v := range data {
- c.FormData.Set(k, v)
- }
- return c
-}
-
-// SetBasicAuth method sets the basic authentication header in the HTTP request. For Example:
-// Authorization: Basic
-//
-// For Example: To set the header for username "go-resty" and password "welcome"
-// client.SetBasicAuth("go-resty", "welcome")
-//
-// This basic auth information gets added to all the request rasied from this client instance.
-// Also it can be overridden or set one at the request level is supported.
-//
-// See `Request.SetBasicAuth`.
-func (c *Client) SetBasicAuth(username, password string) *Client {
- c.UserInfo = &User{Username: username, Password: password}
- return c
-}
-
-// SetAuthToken method sets the auth token of the `Authorization` header for all HTTP requests.
-// The default auth scheme is `Bearer`, it can be customized with the method `SetAuthScheme`. For Example:
-// Authorization:
-//
-// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
-//
-// client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
-//
-// This auth token gets added to all the requests rasied from this client instance.
-// Also it can be overridden or set one at the request level is supported.
-//
-// See `Request.SetAuthToken`.
-func (c *Client) SetAuthToken(token string) *Client {
- c.Token = token
- return c
-}
-
-// SetAuthScheme method sets the auth scheme type in the HTTP request. For Example:
-// Authorization:
-//
-// For Example: To set the scheme to use OAuth
-//
-// client.SetAuthScheme("OAuth")
-//
-// This auth scheme gets added to all the requests rasied from this client instance.
-// Also it can be overridden or set one at the request level is supported.
-//
-// Information about auth schemes can be found in RFC7235 which is linked to below
-// along with the page containing the currently defined official authentication schemes:
-// https://tools.ietf.org/html/rfc7235
-// https://www.iana.org/assignments/http-authschemes/http-authschemes.xhtml#authschemes
-//
-// See `Request.SetAuthToken`.
-func (c *Client) SetAuthScheme(scheme string) *Client {
- c.AuthScheme = scheme
- return c
-}
-
-// R method creates a new request instance, its used for Get, Post, Put, Delete, Patch, Head, Options, etc.
-func (c *Client) R() *Request {
- r := &Request{
- QueryParam: url.Values{},
- FormData: url.Values{},
- Header: http.Header{},
- Cookies: make([]*http.Cookie, 0),
-
- client: c,
- multipartFiles: []*File{},
- multipartFields: []*MultipartField{},
- PathParams: map[string]string{},
- jsonEscapeHTML: true,
- }
- return r
-}
-
-// NewRequest is an alias for method `R()`. Creates a new request instance, its used for
-// Get, Post, Put, Delete, Patch, Head, Options, etc.
-func (c *Client) NewRequest() *Request {
- return c.R()
-}
-
-// OnBeforeRequest method appends request middleware into the before request chain.
-// Its gets applied after default Resty request middlewares and before request
-// been sent from Resty to host server.
-// client.OnBeforeRequest(func(c *resty.Client, r *resty.Request) error {
-// // Now you have access to Client and Request instance
-// // manipulate it as per your need
-//
-// return nil // if its success otherwise return error
-// })
-func (c *Client) OnBeforeRequest(m RequestMiddleware) *Client {
- c.udBeforeRequest = append(c.udBeforeRequest, m)
- return c
-}
-
-// OnAfterResponse method appends response middleware into the after response chain.
-// Once we receive response from host server, default Resty response middleware
-// gets applied and then user assigened response middlewares applied.
-// client.OnAfterResponse(func(c *resty.Client, r *resty.Response) error {
-// // Now you have access to Client and Response instance
-// // manipulate it as per your need
-//
-// return nil // if its success otherwise return error
-// })
-func (c *Client) OnAfterResponse(m ResponseMiddleware) *Client {
- c.afterResponse = append(c.afterResponse, m)
- return c
-}
-
-// OnError method adds a callback that will be run whenever a request execution fails.
-// This is called after all retries have been attempted (if any).
-// If there was a response from the server, the error will be wrapped in *ResponseError
-// which has the last response received from the server.
-//
-// client.OnError(func(req *resty.Request, err error) {
-// if v, ok := err.(*resty.ResponseError); ok {
-// // Do something with v.Response
-// }
-// // Log the error, increment a metric, etc...
-// })
-func (c *Client) OnError(h ErrorHook) *Client {
- c.errorHooks = append(c.errorHooks, h)
- return c
-}
-
-// SetPreRequestHook method sets the given pre-request function into resty client.
-// It is called right before the request is fired.
-//
-// Note: Only one pre-request hook can be registered. Use `client.OnBeforeRequest` for mutilple.
-func (c *Client) SetPreRequestHook(h PreRequestHook) *Client {
- if c.preReqHook != nil {
- c.log.Warnf("Overwriting an existing pre-request hook: %s", functionName(h))
- }
- c.preReqHook = h
- return c
-}
-
-// SetDebug method enables the debug mode on Resty client. Client logs details of every request and response.
-// For `Request` it logs information such as HTTP verb, Relative URL path, Host, Headers, Body if it has one.
-// For `Response` it logs information such as Status, Response Time, Headers, Body if it has one.
-// client.SetDebug(true)
-func (c *Client) SetDebug(d bool) *Client {
- c.Debug = d
- return c
-}
-
-// SetDebugBodyLimit sets the maximum size for which the response and request body will be logged in debug mode.
-// client.SetDebugBodyLimit(1000000)
-func (c *Client) SetDebugBodyLimit(sl int64) *Client {
- c.debugBodySizeLimit = sl
- return c
-}
-
-// OnRequestLog method used to set request log callback into Resty. Registered callback gets
-// called before the resty actually logs the information.
-func (c *Client) OnRequestLog(rl RequestLogCallback) *Client {
- if c.requestLog != nil {
- c.log.Warnf("Overwriting an existing on-request-log callback from=%s to=%s",
- functionName(c.requestLog), functionName(rl))
- }
- c.requestLog = rl
- return c
-}
-
-// OnResponseLog method used to set response log callback into Resty. Registered callback gets
-// called before the resty actually logs the information.
-func (c *Client) OnResponseLog(rl ResponseLogCallback) *Client {
- if c.responseLog != nil {
- c.log.Warnf("Overwriting an existing on-response-log callback from=%s to=%s",
- functionName(c.responseLog), functionName(rl))
- }
- c.responseLog = rl
- return c
-}
-
-// SetDisableWarn method disables the warning message on Resty client.
-//
-// For Example: Resty warns the user when BasicAuth used on non-TLS mode.
-// client.SetDisableWarn(true)
-func (c *Client) SetDisableWarn(d bool) *Client {
- c.DisableWarn = d
- return c
-}
-
-// SetAllowGetMethodPayload method allows the GET method with payload on Resty client.
-//
-// For Example: Resty allows the user sends request with a payload on HTTP GET method.
-// client.SetAllowGetMethodPayload(true)
-func (c *Client) SetAllowGetMethodPayload(a bool) *Client {
- c.AllowGetMethodPayload = a
- return c
-}
-
-// SetLogger method sets given writer for logging Resty request and response details.
-//
-// Compliant to interface `resty.Logger`.
-func (c *Client) SetLogger(l Logger) *Client {
- c.log = l
- return c
-}
-
-// SetContentLength method enables the HTTP header `Content-Length` value for every request.
-// By default Resty won't set `Content-Length`.
-// client.SetContentLength(true)
-//
-// Also you have an option to enable for particular request. See `Request.SetContentLength`
-func (c *Client) SetContentLength(l bool) *Client {
- c.setContentLength = l
- return c
-}
-
-// SetTimeout method sets timeout for request raised from client.
-// client.SetTimeout(time.Duration(1 * time.Minute))
-func (c *Client) SetTimeout(timeout time.Duration) *Client {
- c.httpClient.Timeout = timeout
- return c
-}
-
-// SetError method is to register the global or client common `Error` object into Resty.
-// It is used for automatic unmarshalling if response status code is greater than 399 and
-// content type either JSON or XML. Can be pointer or non-pointer.
-// client.SetError(&Error{})
-// // OR
-// client.SetError(Error{})
-func (c *Client) SetError(err interface{}) *Client {
- c.Error = typeOf(err)
- return c
-}
-
-// SetRedirectPolicy method sets the client redirect poilicy. Resty provides ready to use
-// redirect policies. Wanna create one for yourself refer to `redirect.go`.
-//
-// client.SetRedirectPolicy(FlexibleRedirectPolicy(20))
-//
-// // Need multiple redirect policies together
-// client.SetRedirectPolicy(FlexibleRedirectPolicy(20), DomainCheckRedirectPolicy("host1.com", "host2.net"))
-func (c *Client) SetRedirectPolicy(policies ...interface{}) *Client {
- for _, p := range policies {
- if _, ok := p.(RedirectPolicy); !ok {
- c.log.Errorf("%v does not implement resty.RedirectPolicy (missing Apply method)",
- functionName(p))
- }
- }
-
- c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
- for _, p := range policies {
- if err := p.(RedirectPolicy).Apply(req, via); err != nil {
- return err
- }
- }
- return nil // looks good, go ahead
- }
-
- return c
-}
-
-// SetRetryCount method enables retry on Resty client and allows you
-// to set no. of retry count. Resty uses a Backoff mechanism.
-func (c *Client) SetRetryCount(count int) *Client {
- c.RetryCount = count
- return c
-}
-
-// SetRetryWaitTime method sets default wait time to sleep before retrying
-// request.
-//
-// Default is 100 milliseconds.
-func (c *Client) SetRetryWaitTime(waitTime time.Duration) *Client {
- c.RetryWaitTime = waitTime
- return c
-}
-
-// SetRetryMaxWaitTime method sets max wait time to sleep before retrying
-// request.
-//
-// Default is 2 seconds.
-func (c *Client) SetRetryMaxWaitTime(maxWaitTime time.Duration) *Client {
- c.RetryMaxWaitTime = maxWaitTime
- return c
-}
-
-// SetRetryAfter sets callback to calculate wait time between retries.
-// Default (nil) implies exponential backoff with jitter
-func (c *Client) SetRetryAfter(callback RetryAfterFunc) *Client {
- c.RetryAfter = callback
- return c
-}
-
-// AddRetryCondition method adds a retry condition function to array of functions
-// that are checked to determine if the request is retried. The request will
-// retry if any of the functions return true and error is nil.
-//
-// Note: These retry conditions are applied on all Request made using this Client.
-// For Request specific retry conditions check *Request.AddRetryCondition
-func (c *Client) AddRetryCondition(condition RetryConditionFunc) *Client {
- c.RetryConditions = append(c.RetryConditions, condition)
- return c
-}
-
-// AddRetryAfterErrorCondition adds the basic condition of retrying after encountering
-// an error from the http response
-//
-// Since v2.6.0
-func (c *Client) AddRetryAfterErrorCondition() *Client {
- c.AddRetryCondition(func(response *Response, err error) bool {
- return response.IsError()
- })
- return c
-}
-
-// AddRetryHook adds a side-effecting retry hook to an array of hooks
-// that will be executed on each retry.
-//
-// Since v2.6.0
-func (c *Client) AddRetryHook(hook OnRetryFunc) *Client {
- c.RetryHooks = append(c.RetryHooks, hook)
- return c
-}
-
-// SetTLSClientConfig method sets TLSClientConfig for underling client Transport.
-//
-// For Example:
-// // One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
-// client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
-//
-// // or One can disable security check (https)
-// client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
-//
-// Note: This method overwrites existing `TLSClientConfig`.
-func (c *Client) SetTLSClientConfig(config *tls.Config) *Client {
- transport, err := c.transport()
- if err != nil {
- c.log.Errorf("%v", err)
- return c
- }
- transport.TLSClientConfig = config
- return c
-}
-
-// SetProxy method sets the Proxy URL and Port for Resty client.
-// client.SetProxy("http://proxyserver:8888")
-//
-// OR Without this `SetProxy` method, you could also set Proxy via environment variable.
-//
-// Refer to godoc `http.ProxyFromEnvironment`.
-func (c *Client) SetProxy(proxyURL string) *Client {
- transport, err := c.transport()
- if err != nil {
- c.log.Errorf("%v", err)
- return c
- }
-
- pURL, err := url.Parse(proxyURL)
- if err != nil {
- c.log.Errorf("%v", err)
- return c
- }
-
- c.proxyURL = pURL
- transport.Proxy = http.ProxyURL(c.proxyURL)
- return c
-}
-
-// RemoveProxy method removes the proxy configuration from Resty client
-// client.RemoveProxy()
-func (c *Client) RemoveProxy() *Client {
- transport, err := c.transport()
- if err != nil {
- c.log.Errorf("%v", err)
- return c
- }
- c.proxyURL = nil
- transport.Proxy = nil
- return c
-}
-
-// SetCertificates method helps to set client certificates into Resty conveniently.
-func (c *Client) SetCertificates(certs ...tls.Certificate) *Client {
- config, err := c.tlsConfig()
- if err != nil {
- c.log.Errorf("%v", err)
- return c
- }
- config.Certificates = append(config.Certificates, certs...)
- return c
-}
-
-// SetRootCertificate method helps to add one or more root certificates into Resty client
-// client.SetRootCertificate("/path/to/root/pemFile.pem")
-func (c *Client) SetRootCertificate(pemFilePath string) *Client {
- rootPemData, err := ioutil.ReadFile(pemFilePath)
- if err != nil {
- c.log.Errorf("%v", err)
- return c
- }
-
- config, err := c.tlsConfig()
- if err != nil {
- c.log.Errorf("%v", err)
- return c
- }
- if config.RootCAs == nil {
- config.RootCAs = x509.NewCertPool()
- }
-
- config.RootCAs.AppendCertsFromPEM(rootPemData)
- return c
-}
-
-// SetRootCertificateFromString method helps to add one or more root certificates into Resty client
-// client.SetRootCertificateFromString("pem file content")
-func (c *Client) SetRootCertificateFromString(pemContent string) *Client {
- config, err := c.tlsConfig()
- if err != nil {
- c.log.Errorf("%v", err)
- return c
- }
- if config.RootCAs == nil {
- config.RootCAs = x509.NewCertPool()
- }
-
- config.RootCAs.AppendCertsFromPEM([]byte(pemContent))
- return c
-}
-
-// SetOutputDirectory method sets output directory for saving HTTP response into file.
-// If the output directory not exists then resty creates one. This setting is optional one,
-// if you're planning using absolute path in `Request.SetOutput` and can used together.
-// client.SetOutputDirectory("/save/http/response/here")
-func (c *Client) SetOutputDirectory(dirPath string) *Client {
- c.outputDirectory = dirPath
- return c
-}
-
-// SetTransport method sets custom `*http.Transport` or any `http.RoundTripper`
-// compatible interface implementation in the resty client.
-//
-// Note:
-//
-// - If transport is not type of `*http.Transport` then you may not be able to
-// take advantage of some of the Resty client settings.
-//
-// - It overwrites the Resty client transport instance and it's configurations.
-//
-// transport := &http.Transport{
-// // somthing like Proxying to httptest.Server, etc...
-// Proxy: func(req *http.Request) (*url.URL, error) {
-// return url.Parse(server.URL)
-// },
-// }
-//
-// client.SetTransport(transport)
-func (c *Client) SetTransport(transport http.RoundTripper) *Client {
- if transport != nil {
- c.httpClient.Transport = transport
- }
- return c
-}
-
-// SetScheme method sets custom scheme in the Resty client. It's way to override default.
-// client.SetScheme("http")
-func (c *Client) SetScheme(scheme string) *Client {
- if !IsStringEmpty(scheme) {
- c.scheme = strings.TrimSpace(scheme)
- }
- return c
-}
-
-// SetCloseConnection method sets variable `Close` in http request struct with the given
-// value. More info: https://golang.org/src/net/http/request.go
-func (c *Client) SetCloseConnection(close bool) *Client {
- c.closeConnection = close
- return c
-}
-
-// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
-// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
-// otherwise you might get into connection leaks, no connection reuse.
-//
-// Note: Response middlewares are not applicable, if you use this option. Basically you have
-// taken over the control of response parsing from `Resty`.
-func (c *Client) SetDoNotParseResponse(parse bool) *Client {
- c.notParseResponse = parse
- return c
-}
-
-// SetPathParam method sets single URL path key-value pair in the
-// Resty client instance.
-// client.SetPathParam("userId", "sample@sample.com")
-//
-// Result:
-// URL - /v1/users/{userId}/details
-// Composed URL - /v1/users/sample@sample.com/details
-// It replaces the value of the key while composing the request URL.
-//
-// Also it can be overridden at request level Path Params options,
-// see `Request.SetPathParam` or `Request.SetPathParams`.
-func (c *Client) SetPathParam(param, value string) *Client {
- c.PathParams[param] = value
- return c
-}
-
-// SetPathParams method sets multiple URL path key-value pairs at one go in the
-// Resty client instance.
-// client.SetPathParams(map[string]string{
-// "userId": "sample@sample.com",
-// "subAccountId": "100002",
-// })
-//
-// Result:
-// URL - /v1/users/{userId}/{subAccountId}/details
-// Composed URL - /v1/users/sample@sample.com/100002/details
-// It replaces the value of the key while composing the request URL.
-//
-// Also it can be overridden at request level Path Params options,
-// see `Request.SetPathParam` or `Request.SetPathParams`.
-func (c *Client) SetPathParams(params map[string]string) *Client {
- for p, v := range params {
- c.SetPathParam(p, v)
- }
- return c
-}
-
-// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
-//
-// Note: This option only applicable to standard JSON Marshaller.
-func (c *Client) SetJSONEscapeHTML(b bool) *Client {
- c.jsonEscapeHTML = b
- return c
-}
-
-// EnableTrace method enables the Resty client trace for the requests fired from
-// the client using `httptrace.ClientTrace` and provides insights.
-//
-// client := resty.New().EnableTrace()
-//
-// resp, err := client.R().Get("https://httpbin.org/get")
-// fmt.Println("Error:", err)
-// fmt.Println("Trace Info:", resp.Request.TraceInfo())
-//
-// Also `Request.EnableTrace` available too to get trace info for single request.
-//
-// Since v2.0.0
-func (c *Client) EnableTrace() *Client {
- c.trace = true
- return c
-}
-
-// DisableTrace method disables the Resty client trace. Refer to `Client.EnableTrace`.
-//
-// Since v2.0.0
-func (c *Client) DisableTrace() *Client {
- c.trace = false
- return c
-}
-
-// IsProxySet method returns the true is proxy is set from resty client otherwise
-// false. By default proxy is set from environment, refer to `http.ProxyFromEnvironment`.
-func (c *Client) IsProxySet() bool {
- return c.proxyURL != nil
-}
-
-// GetClient method returns the current `http.Client` used by the resty client.
-func (c *Client) GetClient() *http.Client {
- return c.httpClient
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Client Unexported methods
-//_______________________________________________________________________
-
-// Executes method executes the given `Request` object and returns response
-// error.
-func (c *Client) execute(req *Request) (*Response, error) {
- // Apply Request middleware
- var err error
-
- // user defined on before request methods
- // to modify the *resty.Request object
- for _, f := range c.udBeforeRequest {
- if err = f(c, req); err != nil {
- return nil, wrapNoRetryErr(err)
- }
- }
-
- // resty middlewares
- for _, f := range c.beforeRequest {
- if err = f(c, req); err != nil {
- return nil, wrapNoRetryErr(err)
- }
- }
-
- if hostHeader := req.Header.Get("Host"); hostHeader != "" {
- req.RawRequest.Host = hostHeader
- }
-
- // call pre-request if defined
- if c.preReqHook != nil {
- if err = c.preReqHook(c, req.RawRequest); err != nil {
- return nil, wrapNoRetryErr(err)
- }
- }
-
- if err = requestLogger(c, req); err != nil {
- return nil, wrapNoRetryErr(err)
- }
-
- req.RawRequest.Body = newRequestBodyReleaser(req.RawRequest.Body, req.bodyBuf)
-
- req.Time = time.Now()
- resp, err := c.httpClient.Do(req.RawRequest)
-
- response := &Response{
- Request: req,
- RawResponse: resp,
- }
-
- if err != nil || req.notParseResponse || c.notParseResponse {
- response.setReceivedAt()
- return response, err
- }
-
- if !req.isSaveResponse {
- defer closeq(resp.Body)
- body := resp.Body
-
- // GitHub #142 & #187
- if strings.EqualFold(resp.Header.Get(hdrContentEncodingKey), "gzip") && resp.ContentLength != 0 {
- if _, ok := body.(*gzip.Reader); !ok {
- body, err = gzip.NewReader(body)
- if err != nil {
- response.setReceivedAt()
- return response, err
- }
- defer closeq(body)
- }
- }
-
- if response.body, err = ioutil.ReadAll(body); err != nil {
- response.setReceivedAt()
- return response, err
- }
-
- response.size = int64(len(response.body))
- }
-
- response.setReceivedAt() // after we read the body
-
- // Apply Response middleware
- for _, f := range c.afterResponse {
- if err = f(c, response); err != nil {
- break
- }
- }
-
- return response, wrapNoRetryErr(err)
-}
-
-// getting TLS client config if not exists then create one
-func (c *Client) tlsConfig() (*tls.Config, error) {
- transport, err := c.transport()
- if err != nil {
- return nil, err
- }
- if transport.TLSClientConfig == nil {
- transport.TLSClientConfig = &tls.Config{}
- }
- return transport.TLSClientConfig, nil
-}
-
-// Transport method returns `*http.Transport` currently in use or error
-// in case currently used `transport` is not a `*http.Transport`.
-func (c *Client) transport() (*http.Transport, error) {
- if transport, ok := c.httpClient.Transport.(*http.Transport); ok {
- return transport, nil
- }
- return nil, errors.New("current transport is not an *http.Transport instance")
-}
-
-// just an internal helper method
-func (c *Client) outputLogTo(w io.Writer) *Client {
- c.log.(*logger).l.SetOutput(w)
- return c
-}
-
-// ResponseError is a wrapper for including the server response with an error.
-// Neither the err nor the response should be nil.
-type ResponseError struct {
- Response *Response
- Err error
-}
-
-func (e *ResponseError) Error() string {
- return e.Err.Error()
-}
-
-func (e *ResponseError) Unwrap() error {
- return e.Err
-}
-
-// Helper to run onErrorHooks hooks.
-// It wraps the error in a ResponseError if the resp is not nil
-// so hooks can access it.
-func (c *Client) onErrorHooks(req *Request, resp *Response, err error) {
- if err != nil {
- if resp != nil { // wrap with ResponseError
- err = &ResponseError{Response: resp, Err: err}
- }
- for _, h := range c.errorHooks {
- h(req, err)
- }
- }
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// File struct and its methods
-//_______________________________________________________________________
-
-// File struct represent file information for multipart request
-type File struct {
- Name string
- ParamName string
- io.Reader
-}
-
-// String returns string value of current file details
-func (f *File) String() string {
- return fmt.Sprintf("ParamName: %v; FileName: %v", f.ParamName, f.Name)
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// MultipartField struct
-//_______________________________________________________________________
-
-// MultipartField struct represent custom data part for multipart request
-type MultipartField struct {
- Param string
- FileName string
- ContentType string
- io.Reader
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Unexported package methods
-//_______________________________________________________________________
-
-func createClient(hc *http.Client) *Client {
- if hc.Transport == nil {
- hc.Transport = createTransport(nil)
- }
-
- c := &Client{ // not setting lang default values
- QueryParam: url.Values{},
- FormData: url.Values{},
- Header: http.Header{},
- Cookies: make([]*http.Cookie, 0),
- RetryWaitTime: defaultWaitTime,
- RetryMaxWaitTime: defaultMaxWaitTime,
- PathParams: make(map[string]string),
- JSONMarshal: json.Marshal,
- JSONUnmarshal: json.Unmarshal,
- XMLMarshal: xml.Marshal,
- XMLUnmarshal: xml.Unmarshal,
- HeaderAuthorizationKey: http.CanonicalHeaderKey("Authorization"),
-
- jsonEscapeHTML: true,
- httpClient: hc,
- debugBodySizeLimit: math.MaxInt32,
- }
-
- // Logger
- c.SetLogger(createLogger())
-
- // default before request middlewares
- c.beforeRequest = []RequestMiddleware{
- parseRequestURL,
- parseRequestHeader,
- parseRequestBody,
- createHTTPRequest,
- addCredentials,
- }
-
- // user defined request middlewares
- c.udBeforeRequest = []RequestMiddleware{}
-
- // default after response middlewares
- c.afterResponse = []ResponseMiddleware{
- responseLogger,
- parseResponseBody,
- saveResponseIntoFile,
- }
-
- return c
-}
diff --git a/vendor/github.com/go-resty/resty/v2/middleware.go b/vendor/github.com/go-resty/resty/v2/middleware.go
deleted file mode 100644
index 0e8ac2b6..00000000
--- a/vendor/github.com/go-resty/resty/v2/middleware.go
+++ /dev/null
@@ -1,543 +0,0 @@
-// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
-// resty source code and usage is governed by a MIT style
-// license that can be found in the LICENSE file.
-
-package resty
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "mime/multipart"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "reflect"
- "strings"
- "time"
-)
-
-const debugRequestLogKey = "__restyDebugRequestLog"
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Request Middleware(s)
-//_______________________________________________________________________
-
-func parseRequestURL(c *Client, r *Request) error {
- // GitHub #103 Path Params
- if len(r.PathParams) > 0 {
- for p, v := range r.PathParams {
- r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
- }
- }
- if len(c.PathParams) > 0 {
- for p, v := range c.PathParams {
- r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
- }
- }
-
- // Parsing request URL
- reqURL, err := url.Parse(r.URL)
- if err != nil {
- return err
- }
-
- // If Request.URL is relative path then added c.HostURL into
- // the request URL otherwise Request.URL will be used as-is
- if !reqURL.IsAbs() {
- r.URL = reqURL.String()
- if len(r.URL) > 0 && r.URL[0] != '/' {
- r.URL = "/" + r.URL
- }
-
- reqURL, err = url.Parse(c.HostURL + r.URL)
- if err != nil {
- return err
- }
- }
-
- // GH #407 && #318
- if reqURL.Scheme == "" && len(c.scheme) > 0 {
- reqURL.Scheme = c.scheme
- }
-
- // Adding Query Param
- query := make(url.Values)
- for k, v := range c.QueryParam {
- for _, iv := range v {
- query.Add(k, iv)
- }
- }
-
- for k, v := range r.QueryParam {
- // remove query param from client level by key
- // since overrides happens for that key in the request
- query.Del(k)
-
- for _, iv := range v {
- query.Add(k, iv)
- }
- }
-
- // GitHub #123 Preserve query string order partially.
- // Since not feasible in `SetQuery*` resty methods, because
- // standard package `url.Encode(...)` sorts the query params
- // alphabetically
- if len(query) > 0 {
- if IsStringEmpty(reqURL.RawQuery) {
- reqURL.RawQuery = query.Encode()
- } else {
- reqURL.RawQuery = reqURL.RawQuery + "&" + query.Encode()
- }
- }
-
- r.URL = reqURL.String()
-
- return nil
-}
-
-func parseRequestHeader(c *Client, r *Request) error {
- hdr := make(http.Header)
- for k := range c.Header {
- hdr[k] = append(hdr[k], c.Header[k]...)
- }
-
- for k := range r.Header {
- hdr.Del(k)
- hdr[k] = append(hdr[k], r.Header[k]...)
- }
-
- if IsStringEmpty(hdr.Get(hdrUserAgentKey)) {
- hdr.Set(hdrUserAgentKey, hdrUserAgentValue)
- }
-
- ct := hdr.Get(hdrContentTypeKey)
- if IsStringEmpty(hdr.Get(hdrAcceptKey)) && !IsStringEmpty(ct) &&
- (IsJSONType(ct) || IsXMLType(ct)) {
- hdr.Set(hdrAcceptKey, hdr.Get(hdrContentTypeKey))
- }
-
- r.Header = hdr
-
- return nil
-}
-
-func parseRequestBody(c *Client, r *Request) (err error) {
- if isPayloadSupported(r.Method, c.AllowGetMethodPayload) {
- // Handling Multipart
- if r.isMultiPart && !(r.Method == MethodPatch) {
- if err = handleMultipart(c, r); err != nil {
- return
- }
-
- goto CL
- }
-
- // Handling Form Data
- if len(c.FormData) > 0 || len(r.FormData) > 0 {
- handleFormData(c, r)
-
- goto CL
- }
-
- // Handling Request body
- if r.Body != nil {
- handleContentType(c, r)
-
- if err = handleRequestBody(c, r); err != nil {
- return
- }
- }
- }
-
-CL:
- // by default resty won't set content length, you can if you want to :)
- if (c.setContentLength || r.setContentLength) && r.bodyBuf != nil {
- r.Header.Set(hdrContentLengthKey, fmt.Sprintf("%d", r.bodyBuf.Len()))
- }
-
- return
-}
-
-func createHTTPRequest(c *Client, r *Request) (err error) {
- if r.bodyBuf == nil {
- if reader, ok := r.Body.(io.Reader); ok {
- r.RawRequest, err = http.NewRequest(r.Method, r.URL, reader)
- } else if c.setContentLength || r.setContentLength {
- r.RawRequest, err = http.NewRequest(r.Method, r.URL, http.NoBody)
- } else {
- r.RawRequest, err = http.NewRequest(r.Method, r.URL, nil)
- }
- } else {
- r.RawRequest, err = http.NewRequest(r.Method, r.URL, r.bodyBuf)
- }
-
- if err != nil {
- return
- }
-
- // Assign close connection option
- r.RawRequest.Close = c.closeConnection
-
- // Add headers into http request
- r.RawRequest.Header = r.Header
-
- // Add cookies from client instance into http request
- for _, cookie := range c.Cookies {
- r.RawRequest.AddCookie(cookie)
- }
-
- // Add cookies from request instance into http request
- for _, cookie := range r.Cookies {
- r.RawRequest.AddCookie(cookie)
- }
-
- // Enable trace
- if c.trace || r.trace {
- r.clientTrace = &clientTrace{}
- r.ctx = r.clientTrace.createContext(r.Context())
- }
-
- // Use context if it was specified
- if r.ctx != nil {
- r.RawRequest = r.RawRequest.WithContext(r.ctx)
- }
-
- bodyCopy, err := getBodyCopy(r)
- if err != nil {
- return err
- }
-
- // assign get body func for the underlying raw request instance
- r.RawRequest.GetBody = func() (io.ReadCloser, error) {
- if bodyCopy != nil {
- return ioutil.NopCloser(bytes.NewReader(bodyCopy.Bytes())), nil
- }
- return nil, nil
- }
-
- return
-}
-
-func addCredentials(c *Client, r *Request) error {
- var isBasicAuth bool
- // Basic Auth
- if r.UserInfo != nil { // takes precedence
- r.RawRequest.SetBasicAuth(r.UserInfo.Username, r.UserInfo.Password)
- isBasicAuth = true
- } else if c.UserInfo != nil {
- r.RawRequest.SetBasicAuth(c.UserInfo.Username, c.UserInfo.Password)
- isBasicAuth = true
- }
-
- if !c.DisableWarn {
- if isBasicAuth && !strings.HasPrefix(r.URL, "https") {
- c.log.Warnf("Using Basic Auth in HTTP mode is not secure, use HTTPS")
- }
- }
-
- // Set the Authorization Header Scheme
- var authScheme string
- if !IsStringEmpty(r.AuthScheme) {
- authScheme = r.AuthScheme
- } else if !IsStringEmpty(c.AuthScheme) {
- authScheme = c.AuthScheme
- } else {
- authScheme = "Bearer"
- }
-
- // Build the Token Auth header
- if !IsStringEmpty(r.Token) { // takes precedence
- r.RawRequest.Header.Set(c.HeaderAuthorizationKey, authScheme+" "+r.Token)
- } else if !IsStringEmpty(c.Token) {
- r.RawRequest.Header.Set(c.HeaderAuthorizationKey, authScheme+" "+c.Token)
- }
-
- return nil
-}
-
-func requestLogger(c *Client, r *Request) error {
- if c.Debug {
- rr := r.RawRequest
- rl := &RequestLog{Header: copyHeaders(rr.Header), Body: r.fmtBodyString(c.debugBodySizeLimit)}
- if c.requestLog != nil {
- if err := c.requestLog(rl); err != nil {
- return err
- }
- }
- // fmt.Sprintf("COOKIES:\n%s\n", composeCookies(c.GetClient().Jar, *rr.URL)) +
-
- reqLog := "\n==============================================================================\n" +
- "~~~ REQUEST ~~~\n" +
- fmt.Sprintf("%s %s %s\n", r.Method, rr.URL.RequestURI(), rr.Proto) +
- fmt.Sprintf("HOST : %s\n", rr.URL.Host) +
- fmt.Sprintf("HEADERS:\n%s\n", composeHeaders(c, r, rl.Header)) +
- fmt.Sprintf("BODY :\n%v\n", rl.Body) +
- "------------------------------------------------------------------------------\n"
-
- r.initValuesMap()
- r.values[debugRequestLogKey] = reqLog
- }
-
- return nil
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Response Middleware(s)
-//_______________________________________________________________________
-
-func responseLogger(c *Client, res *Response) error {
- if c.Debug {
- rl := &ResponseLog{Header: copyHeaders(res.Header()), Body: res.fmtBodyString(c.debugBodySizeLimit)}
- if c.responseLog != nil {
- if err := c.responseLog(rl); err != nil {
- return err
- }
- }
-
- debugLog := res.Request.values[debugRequestLogKey].(string)
- debugLog += "~~~ RESPONSE ~~~\n" +
- fmt.Sprintf("STATUS : %s\n", res.Status()) +
- fmt.Sprintf("PROTO : %s\n", res.RawResponse.Proto) +
- fmt.Sprintf("RECEIVED AT : %v\n", res.ReceivedAt().Format(time.RFC3339Nano)) +
- fmt.Sprintf("TIME DURATION: %v\n", res.Time()) +
- "HEADERS :\n" +
- composeHeaders(c, res.Request, rl.Header) + "\n"
- if res.Request.isSaveResponse {
- debugLog += "BODY :\n***** RESPONSE WRITTEN INTO FILE *****\n"
- } else {
- debugLog += fmt.Sprintf("BODY :\n%v\n", rl.Body)
- }
- debugLog += "==============================================================================\n"
-
- c.log.Debugf("%s", debugLog)
- }
-
- return nil
-}
-
-func parseResponseBody(c *Client, res *Response) (err error) {
- if res.StatusCode() == http.StatusNoContent {
- return
- }
- // Handles only JSON or XML content type
- ct := firstNonEmpty(res.Request.forceContentType, res.Header().Get(hdrContentTypeKey), res.Request.fallbackContentType)
- if IsJSONType(ct) || IsXMLType(ct) {
- // HTTP status code > 199 and < 300, considered as Result
- if res.IsSuccess() {
- res.Request.Error = nil
- if res.Request.Result != nil {
- err = Unmarshalc(c, ct, res.body, res.Request.Result)
- return
- }
- }
-
- // HTTP status code > 399, considered as Error
- if res.IsError() {
- // global error interface
- if res.Request.Error == nil && c.Error != nil {
- res.Request.Error = reflect.New(c.Error).Interface()
- }
-
- if res.Request.Error != nil {
- err = Unmarshalc(c, ct, res.body, res.Request.Error)
- }
- }
- }
-
- return
-}
-
-func handleMultipart(c *Client, r *Request) (err error) {
- r.bodyBuf = acquireBuffer()
- w := multipart.NewWriter(r.bodyBuf)
-
- for k, v := range c.FormData {
- for _, iv := range v {
- if err = w.WriteField(k, iv); err != nil {
- return err
- }
- }
- }
-
- for k, v := range r.FormData {
- for _, iv := range v {
- if strings.HasPrefix(k, "@") { // file
- err = addFile(w, k[1:], iv)
- if err != nil {
- return
- }
- } else { // form value
- if err = w.WriteField(k, iv); err != nil {
- return err
- }
- }
- }
- }
-
- // #21 - adding io.Reader support
- if len(r.multipartFiles) > 0 {
- for _, f := range r.multipartFiles {
- err = addFileReader(w, f)
- if err != nil {
- return
- }
- }
- }
-
- // GitHub #130 adding multipart field support with content type
- if len(r.multipartFields) > 0 {
- for _, mf := range r.multipartFields {
- if err = addMultipartFormField(w, mf); err != nil {
- return
- }
- }
- }
-
- r.Header.Set(hdrContentTypeKey, w.FormDataContentType())
- err = w.Close()
-
- return
-}
-
-func handleFormData(c *Client, r *Request) {
- formData := url.Values{}
-
- for k, v := range c.FormData {
- for _, iv := range v {
- formData.Add(k, iv)
- }
- }
-
- for k, v := range r.FormData {
- // remove form data field from client level by key
- // since overrides happens for that key in the request
- formData.Del(k)
-
- for _, iv := range v {
- formData.Add(k, iv)
- }
- }
-
- r.bodyBuf = bytes.NewBuffer([]byte(formData.Encode()))
- r.Header.Set(hdrContentTypeKey, formContentType)
- r.isFormData = true
-}
-
-func handleContentType(c *Client, r *Request) {
- contentType := r.Header.Get(hdrContentTypeKey)
- if IsStringEmpty(contentType) {
- contentType = DetectContentType(r.Body)
- r.Header.Set(hdrContentTypeKey, contentType)
- }
-}
-
-func handleRequestBody(c *Client, r *Request) (err error) {
- var bodyBytes []byte
- contentType := r.Header.Get(hdrContentTypeKey)
- kind := kindOf(r.Body)
- r.bodyBuf = nil
-
- if reader, ok := r.Body.(io.Reader); ok {
- if c.setContentLength || r.setContentLength { // keep backward compatibility
- r.bodyBuf = acquireBuffer()
- _, err = r.bodyBuf.ReadFrom(reader)
- r.Body = nil
- } else {
- // Otherwise buffer less processing for `io.Reader`, sounds good.
- return
- }
- } else if b, ok := r.Body.([]byte); ok {
- bodyBytes = b
- } else if s, ok := r.Body.(string); ok {
- bodyBytes = []byte(s)
- } else if IsJSONType(contentType) &&
- (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice) {
- r.bodyBuf, err = jsonMarshal(c, r, r.Body)
- if err != nil {
- return
- }
- } else if IsXMLType(contentType) && (kind == reflect.Struct) {
- bodyBytes, err = c.XMLMarshal(r.Body)
- if err != nil {
- return
- }
- }
-
- if bodyBytes == nil && r.bodyBuf == nil {
- err = errors.New("unsupported 'Body' type/value")
- }
-
- // if any errors during body bytes handling, return it
- if err != nil {
- return
- }
-
- // []byte into Buffer
- if bodyBytes != nil && r.bodyBuf == nil {
- r.bodyBuf = acquireBuffer()
- _, _ = r.bodyBuf.Write(bodyBytes)
- }
-
- return
-}
-
-func saveResponseIntoFile(c *Client, res *Response) error {
- if res.Request.isSaveResponse {
- file := ""
-
- if len(c.outputDirectory) > 0 && !filepath.IsAbs(res.Request.outputFile) {
- file += c.outputDirectory + string(filepath.Separator)
- }
-
- file = filepath.Clean(file + res.Request.outputFile)
- if err := createDirectory(filepath.Dir(file)); err != nil {
- return err
- }
-
- outFile, err := os.Create(file)
- if err != nil {
- return err
- }
- defer closeq(outFile)
-
- // io.Copy reads maximum 32kb size, it is perfect for large file download too
- defer closeq(res.RawResponse.Body)
-
- written, err := io.Copy(outFile, res.RawResponse.Body)
- if err != nil {
- return err
- }
-
- res.size = written
- }
-
- return nil
-}
-
-func getBodyCopy(r *Request) (*bytes.Buffer, error) {
- // If r.bodyBuf present, return the copy
- if r.bodyBuf != nil {
- return bytes.NewBuffer(r.bodyBuf.Bytes()), nil
- }
-
- // Maybe body is `io.Reader`.
- // Note: Resty user have to watchout for large body size of `io.Reader`
- if r.RawRequest.Body != nil {
- b, err := ioutil.ReadAll(r.RawRequest.Body)
- if err != nil {
- return nil, err
- }
-
- // Restore the Body
- closeq(r.RawRequest.Body)
- r.RawRequest.Body = ioutil.NopCloser(bytes.NewBuffer(b))
-
- // Return the Body bytes
- return bytes.NewBuffer(b), nil
- }
- return nil, nil
-}
diff --git a/vendor/github.com/go-resty/resty/v2/redirect.go b/vendor/github.com/go-resty/resty/v2/redirect.go
deleted file mode 100644
index 7d7e43bc..00000000
--- a/vendor/github.com/go-resty/resty/v2/redirect.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
-// resty source code and usage is governed by a MIT style
-// license that can be found in the LICENSE file.
-
-package resty
-
-import (
- "errors"
- "fmt"
- "net"
- "net/http"
- "strings"
-)
-
-type (
- // RedirectPolicy to regulate the redirects in the resty client.
- // Objects implementing the RedirectPolicy interface can be registered as
- //
- // Apply function should return nil to continue the redirect jounery, otherwise
- // return error to stop the redirect.
- RedirectPolicy interface {
- Apply(req *http.Request, via []*http.Request) error
- }
-
- // The RedirectPolicyFunc type is an adapter to allow the use of ordinary functions as RedirectPolicy.
- // If f is a function with the appropriate signature, RedirectPolicyFunc(f) is a RedirectPolicy object that calls f.
- RedirectPolicyFunc func(*http.Request, []*http.Request) error
-)
-
-// Apply calls f(req, via).
-func (f RedirectPolicyFunc) Apply(req *http.Request, via []*http.Request) error {
- return f(req, via)
-}
-
-// NoRedirectPolicy is used to disable redirects in the HTTP client
-// resty.SetRedirectPolicy(NoRedirectPolicy())
-func NoRedirectPolicy() RedirectPolicy {
- return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
- return errors.New("auto redirect is disabled")
- })
-}
-
-// FlexibleRedirectPolicy is convenient method to create No of redirect policy for HTTP client.
-// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20))
-func FlexibleRedirectPolicy(noOfRedirect int) RedirectPolicy {
- return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
- if len(via) >= noOfRedirect {
- return fmt.Errorf("stopped after %d redirects", noOfRedirect)
- }
- checkHostAndAddHeaders(req, via[0])
- return nil
- })
-}
-
-// DomainCheckRedirectPolicy is convenient method to define domain name redirect rule in resty client.
-// Redirect is allowed for only mentioned host in the policy.
-// resty.SetRedirectPolicy(DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
-func DomainCheckRedirectPolicy(hostnames ...string) RedirectPolicy {
- hosts := make(map[string]bool)
- for _, h := range hostnames {
- hosts[strings.ToLower(h)] = true
- }
-
- fn := RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
- if ok := hosts[getHostname(req.URL.Host)]; !ok {
- return errors.New("redirect is not allowed as per DomainCheckRedirectPolicy")
- }
-
- return nil
- })
-
- return fn
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Package Unexported methods
-//_______________________________________________________________________
-
-func getHostname(host string) (hostname string) {
- if strings.Index(host, ":") > 0 {
- host, _, _ = net.SplitHostPort(host)
- }
- hostname = strings.ToLower(host)
- return
-}
-
-// By default Golang will not redirect request headers
-// after go throughing various discussion comments from thread
-// https://github.com/golang/go/issues/4800
-// Resty will add all the headers during a redirect for the same host
-func checkHostAndAddHeaders(cur *http.Request, pre *http.Request) {
- curHostname := getHostname(cur.URL.Host)
- preHostname := getHostname(pre.URL.Host)
- if strings.EqualFold(curHostname, preHostname) {
- for key, val := range pre.Header {
- cur.Header[key] = val
- }
- } else { // only library User-Agent header is added
- cur.Header.Set(hdrUserAgentKey, hdrUserAgentValue)
- }
-}
diff --git a/vendor/github.com/go-resty/resty/v2/request.go b/vendor/github.com/go-resty/resty/v2/request.go
deleted file mode 100644
index 672df88c..00000000
--- a/vendor/github.com/go-resty/resty/v2/request.go
+++ /dev/null
@@ -1,896 +0,0 @@
-// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
-// resty source code and usage is governed by a MIT style
-// license that can be found in the LICENSE file.
-
-package resty
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "encoding/xml"
- "fmt"
- "io"
- "net"
- "net/http"
- "net/url"
- "reflect"
- "strings"
- "time"
-)
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Request struct and methods
-//_______________________________________________________________________
-
-// Request struct is used to compose and fire individual request from
-// resty client. Request provides an options to override client level
-// settings and also an options for the request composition.
-type Request struct {
- URL string
- Method string
- Token string
- AuthScheme string
- QueryParam url.Values
- FormData url.Values
- PathParams map[string]string
- Header http.Header
- Time time.Time
- Body interface{}
- Result interface{}
- Error interface{}
- RawRequest *http.Request
- SRV *SRVRecord
- UserInfo *User
- Cookies []*http.Cookie
-
- // Attempt is to represent the request attempt made during a Resty
- // request execution flow, including retry count.
- //
- // Since v2.4.0
- Attempt int
-
- isMultiPart bool
- isFormData bool
- setContentLength bool
- isSaveResponse bool
- notParseResponse bool
- jsonEscapeHTML bool
- trace bool
- outputFile string
- fallbackContentType string
- forceContentType string
- ctx context.Context
- values map[string]interface{}
- client *Client
- bodyBuf *bytes.Buffer
- clientTrace *clientTrace
- multipartFiles []*File
- multipartFields []*MultipartField
- retryConditions []RetryConditionFunc
-}
-
-// Context method returns the Context if its already set in request
-// otherwise it creates new one using `context.Background()`.
-func (r *Request) Context() context.Context {
- if r.ctx == nil {
- return context.Background()
- }
- return r.ctx
-}
-
-// SetContext method sets the context.Context for current Request. It allows
-// to interrupt the request execution if ctx.Done() channel is closed.
-// See https://blog.golang.org/context article and the "context" package
-// documentation.
-func (r *Request) SetContext(ctx context.Context) *Request {
- r.ctx = ctx
- return r
-}
-
-// SetHeader method is to set a single header field and its value in the current request.
-//
-// For Example: To set `Content-Type` and `Accept` as `application/json`.
-// client.R().
-// SetHeader("Content-Type", "application/json").
-// SetHeader("Accept", "application/json")
-//
-// Also you can override header value, which was set at client instance level.
-func (r *Request) SetHeader(header, value string) *Request {
- r.Header.Set(header, value)
- return r
-}
-
-// SetHeaders method sets multiple headers field and its values at one go in the current request.
-//
-// For Example: To set `Content-Type` and `Accept` as `application/json`
-//
-// client.R().
-// SetHeaders(map[string]string{
-// "Content-Type": "application/json",
-// "Accept": "application/json",
-// })
-// Also you can override header value, which was set at client instance level.
-func (r *Request) SetHeaders(headers map[string]string) *Request {
- for h, v := range headers {
- r.SetHeader(h, v)
- }
- return r
-}
-
-// SetHeaderMultiValues sets multiple headers fields and its values is list of strings at one go in the current request.
-//
-// For Example: To set `Accept` as `text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8`
-//
-// client.R().
-// SetHeaderMultiValues(map[string][]string{
-// "Accept": []string{"text/html", "application/xhtml+xml", "application/xml;q=0.9", "image/webp", "*/*;q=0.8"},
-// })
-// Also you can override header value, which was set at client instance level.
-func (r *Request) SetHeaderMultiValues(headers map[string][]string) *Request {
- for key, values := range headers {
- r.SetHeader(key, strings.Join(values, ", "))
- }
- return r
-}
-
-// SetHeaderVerbatim method is to set a single header field and its value verbatim in the current request.
-//
-// For Example: To set `all_lowercase` and `UPPERCASE` as `available`.
-// client.R().
-// SetHeaderVerbatim("all_lowercase", "available").
-// SetHeaderVerbatim("UPPERCASE", "available")
-//
-// Also you can override header value, which was set at client instance level.
-//
-// Since v2.6.0
-func (r *Request) SetHeaderVerbatim(header, value string) *Request {
- r.Header[header] = []string{value}
- return r
-}
-
-// SetQueryParam method sets single parameter and its value in the current request.
-// It will be formed as query string for the request.
-//
-// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
-// client.R().
-// SetQueryParam("search", "kitchen papers").
-// SetQueryParam("size", "large")
-// Also you can override query params value, which was set at client instance level.
-func (r *Request) SetQueryParam(param, value string) *Request {
- r.QueryParam.Set(param, value)
- return r
-}
-
-// SetQueryParams method sets multiple parameters and its values at one go in the current request.
-// It will be formed as query string for the request.
-//
-// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
-// client.R().
-// SetQueryParams(map[string]string{
-// "search": "kitchen papers",
-// "size": "large",
-// })
-// Also you can override query params value, which was set at client instance level.
-func (r *Request) SetQueryParams(params map[string]string) *Request {
- for p, v := range params {
- r.SetQueryParam(p, v)
- }
- return r
-}
-
-// SetQueryParamsFromValues method appends multiple parameters with multi-value
-// (`url.Values`) at one go in the current request. It will be formed as
-// query string for the request.
-//
-// For Example: `status=pending&status=approved&status=open` in the URL after `?` mark.
-// client.R().
-// SetQueryParamsFromValues(url.Values{
-// "status": []string{"pending", "approved", "open"},
-// })
-// Also you can override query params value, which was set at client instance level.
-func (r *Request) SetQueryParamsFromValues(params url.Values) *Request {
- for p, v := range params {
- for _, pv := range v {
- r.QueryParam.Add(p, pv)
- }
- }
- return r
-}
-
-// SetQueryString method provides ability to use string as an input to set URL query string for the request.
-//
-// Using String as an input
-// client.R().
-// SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
-func (r *Request) SetQueryString(query string) *Request {
- params, err := url.ParseQuery(strings.TrimSpace(query))
- if err == nil {
- for p, v := range params {
- for _, pv := range v {
- r.QueryParam.Add(p, pv)
- }
- }
- } else {
- r.client.log.Errorf("%v", err)
- }
- return r
-}
-
-// SetFormData method sets Form parameters and their values in the current request.
-// It's applicable only HTTP method `POST` and `PUT` and requests content type would be set as
-// `application/x-www-form-urlencoded`.
-// client.R().
-// SetFormData(map[string]string{
-// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
-// "user_id": "3455454545",
-// })
-// Also you can override form data value, which was set at client instance level.
-func (r *Request) SetFormData(data map[string]string) *Request {
- for k, v := range data {
- r.FormData.Set(k, v)
- }
- return r
-}
-
-// SetFormDataFromValues method appends multiple form parameters with multi-value
-// (`url.Values`) at one go in the current request.
-// client.R().
-// SetFormDataFromValues(url.Values{
-// "search_criteria": []string{"book", "glass", "pencil"},
-// })
-// Also you can override form data value, which was set at client instance level.
-func (r *Request) SetFormDataFromValues(data url.Values) *Request {
- for k, v := range data {
- for _, kv := range v {
- r.FormData.Add(k, kv)
- }
- }
- return r
-}
-
-// SetBody method sets the request body for the request. It supports various realtime needs as easy.
-// We can say its quite handy or powerful. Supported request body data types is `string`,
-// `[]byte`, `struct`, `map`, `slice` and `io.Reader`. Body value can be pointer or non-pointer.
-// Automatic marshalling for JSON and XML content type, if it is `struct`, `map`, or `slice`.
-//
-// Note: `io.Reader` is processed as bufferless mode while sending request.
-//
-// For Example: Struct as a body input, based on content type, it will be marshalled.
-// client.R().
-// SetBody(User{
-// Username: "jeeva@myjeeva.com",
-// Password: "welcome2resty",
-// })
-//
-// Map as a body input, based on content type, it will be marshalled.
-// client.R().
-// SetBody(map[string]interface{}{
-// "username": "jeeva@myjeeva.com",
-// "password": "welcome2resty",
-// "address": &Address{
-// Address1: "1111 This is my street",
-// Address2: "Apt 201",
-// City: "My City",
-// State: "My State",
-// ZipCode: 00000,
-// },
-// })
-//
-// String as a body input. Suitable for any need as a string input.
-// client.R().
-// SetBody(`{
-// "username": "jeeva@getrightcare.com",
-// "password": "admin"
-// }`)
-//
-// []byte as a body input. Suitable for raw request such as file upload, serialize & deserialize, etc.
-// client.R().
-// SetBody([]byte("This is my raw request, sent as-is"))
-func (r *Request) SetBody(body interface{}) *Request {
- r.Body = body
- return r
-}
-
-// SetResult method is to register the response `Result` object for automatic unmarshalling for the request,
-// if response status code is between 200 and 299 and content type either JSON or XML.
-//
-// Note: Result object can be pointer or non-pointer.
-// client.R().SetResult(&AuthToken{})
-// // OR
-// client.R().SetResult(AuthToken{})
-//
-// Accessing a result value from response instance.
-// response.Result().(*AuthToken)
-func (r *Request) SetResult(res interface{}) *Request {
- r.Result = getPointer(res)
- return r
-}
-
-// SetError method is to register the request `Error` object for automatic unmarshalling for the request,
-// if response status code is greater than 399 and content type either JSON or XML.
-//
-// Note: Error object can be pointer or non-pointer.
-// client.R().SetError(&AuthError{})
-// // OR
-// client.R().SetError(AuthError{})
-//
-// Accessing a error value from response instance.
-// response.Error().(*AuthError)
-func (r *Request) SetError(err interface{}) *Request {
- r.Error = getPointer(err)
- return r
-}
-
-// SetFile method is to set single file field name and its path for multipart upload.
-// client.R().
-// SetFile("my_file", "/Users/jeeva/Gas Bill - Sep.pdf")
-func (r *Request) SetFile(param, filePath string) *Request {
- r.isMultiPart = true
- r.FormData.Set("@"+param, filePath)
- return r
-}
-
-// SetFiles method is to set multiple file field name and its path for multipart upload.
-// client.R().
-// SetFiles(map[string]string{
-// "my_file1": "/Users/jeeva/Gas Bill - Sep.pdf",
-// "my_file2": "/Users/jeeva/Electricity Bill - Sep.pdf",
-// "my_file3": "/Users/jeeva/Water Bill - Sep.pdf",
-// })
-func (r *Request) SetFiles(files map[string]string) *Request {
- r.isMultiPart = true
- for f, fp := range files {
- r.FormData.Set("@"+f, fp)
- }
- return r
-}
-
-// SetFileReader method is to set single file using io.Reader for multipart upload.
-// client.R().
-// SetFileReader("profile_img", "my-profile-img.png", bytes.NewReader(profileImgBytes)).
-// SetFileReader("notes", "user-notes.txt", bytes.NewReader(notesBytes))
-func (r *Request) SetFileReader(param, fileName string, reader io.Reader) *Request {
- r.isMultiPart = true
- r.multipartFiles = append(r.multipartFiles, &File{
- Name: fileName,
- ParamName: param,
- Reader: reader,
- })
- return r
-}
-
-// SetMultipartFormData method allows simple form data to be attached to the request as `multipart:form-data`
-func (r *Request) SetMultipartFormData(data map[string]string) *Request {
- for k, v := range data {
- r = r.SetMultipartField(k, "", "", strings.NewReader(v))
- }
-
- return r
-}
-
-// SetMultipartField method is to set custom data using io.Reader for multipart upload.
-func (r *Request) SetMultipartField(param, fileName, contentType string, reader io.Reader) *Request {
- r.isMultiPart = true
- r.multipartFields = append(r.multipartFields, &MultipartField{
- Param: param,
- FileName: fileName,
- ContentType: contentType,
- Reader: reader,
- })
- return r
-}
-
-// SetMultipartFields method is to set multiple data fields using io.Reader for multipart upload.
-//
-// For Example:
-// client.R().SetMultipartFields(
-// &resty.MultipartField{
-// Param: "uploadManifest1",
-// FileName: "upload-file-1.json",
-// ContentType: "application/json",
-// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 1", "_filename" : ["file1.txt"]}}`),
-// },
-// &resty.MultipartField{
-// Param: "uploadManifest2",
-// FileName: "upload-file-2.json",
-// ContentType: "application/json",
-// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 2", "_filename" : ["file2.txt"]}}`),
-// })
-//
-// If you have slice already, then simply call-
-// client.R().SetMultipartFields(fields...)
-func (r *Request) SetMultipartFields(fields ...*MultipartField) *Request {
- r.isMultiPart = true
- r.multipartFields = append(r.multipartFields, fields...)
- return r
-}
-
-// SetContentLength method sets the HTTP header `Content-Length` value for current request.
-// By default Resty won't set `Content-Length`. Also you have an option to enable for every
-// request.
-//
-// See `Client.SetContentLength`
-// client.R().SetContentLength(true)
-func (r *Request) SetContentLength(l bool) *Request {
- r.setContentLength = l
- return r
-}
-
-// SetBasicAuth method sets the basic authentication header in the current HTTP request.
-//
-// For Example:
-// Authorization: Basic
-//
-// To set the header for username "go-resty" and password "welcome"
-// client.R().SetBasicAuth("go-resty", "welcome")
-//
-// This method overrides the credentials set by method `Client.SetBasicAuth`.
-func (r *Request) SetBasicAuth(username, password string) *Request {
- r.UserInfo = &User{Username: username, Password: password}
- return r
-}
-
-// SetAuthToken method sets the auth token header(Default Scheme: Bearer) in the current HTTP request. Header example:
-// Authorization: Bearer
-//
-// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
-//
-// client.R().SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
-//
-// This method overrides the Auth token set by method `Client.SetAuthToken`.
-func (r *Request) SetAuthToken(token string) *Request {
- r.Token = token
- return r
-}
-
-// SetAuthScheme method sets the auth token scheme type in the HTTP request. For Example:
-// Authorization:
-//
-// For Example: To set the scheme to use OAuth
-//
-// client.R().SetAuthScheme("OAuth")
-//
-// This auth header scheme gets added to all the request rasied from this client instance.
-// Also it can be overridden or set one at the request level is supported.
-//
-// Information about Auth schemes can be found in RFC7235 which is linked to below along with the page containing
-// the currently defined official authentication schemes:
-// https://tools.ietf.org/html/rfc7235
-// https://www.iana.org/assignments/http-authschemes/http-authschemes.xhtml#authschemes
-//
-// This method overrides the Authorization scheme set by method `Client.SetAuthScheme`.
-func (r *Request) SetAuthScheme(scheme string) *Request {
- r.AuthScheme = scheme
- return r
-}
-
-// SetOutput method sets the output file for current HTTP request. Current HTTP response will be
-// saved into given file. It is similar to `curl -o` flag. Absolute path or relative path can be used.
-// If is it relative path then output file goes under the output directory, as mentioned
-// in the `Client.SetOutputDirectory`.
-// client.R().
-// SetOutput("/Users/jeeva/Downloads/ReplyWithHeader-v5.1-beta.zip").
-// Get("http://bit.ly/1LouEKr")
-//
-// Note: In this scenario `Response.Body` might be nil.
-func (r *Request) SetOutput(file string) *Request {
- r.outputFile = file
- r.isSaveResponse = true
- return r
-}
-
-// SetSRV method sets the details to query the service SRV record and execute the
-// request.
-// client.R().
-// SetSRV(SRVRecord{"web", "testservice.com"}).
-// Get("/get")
-func (r *Request) SetSRV(srv *SRVRecord) *Request {
- r.SRV = srv
- return r
-}
-
-// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
-// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
-// otherwise you might get into connection leaks, no connection reuse.
-//
-// Note: Response middlewares are not applicable, if you use this option. Basically you have
-// taken over the control of response parsing from `Resty`.
-func (r *Request) SetDoNotParseResponse(parse bool) *Request {
- r.notParseResponse = parse
- return r
-}
-
-// SetPathParam method sets single URL path key-value pair in the
-// Resty current request instance.
-// client.R().SetPathParam("userId", "sample@sample.com")
-//
-// Result:
-// URL - /v1/users/{userId}/details
-// Composed URL - /v1/users/sample@sample.com/details
-// It replaces the value of the key while composing the request URL. Also you can
-// override Path Params value, which was set at client instance level.
-func (r *Request) SetPathParam(param, value string) *Request {
- r.PathParams[param] = value
- return r
-}
-
-// SetPathParams method sets multiple URL path key-value pairs at one go in the
-// Resty current request instance.
-// client.R().SetPathParams(map[string]string{
-// "userId": "sample@sample.com",
-// "subAccountId": "100002",
-// })
-//
-// Result:
-// URL - /v1/users/{userId}/{subAccountId}/details
-// Composed URL - /v1/users/sample@sample.com/100002/details
-// It replaces the value of the key while composing request URL. Also you can
-// override Path Params value, which was set at client instance level.
-func (r *Request) SetPathParams(params map[string]string) *Request {
- for p, v := range params {
- r.SetPathParam(p, v)
- }
- return r
-}
-
-// ExpectContentType method allows to provide fallback `Content-Type` for automatic unmarshalling
-// when `Content-Type` response header is unavailable.
-func (r *Request) ExpectContentType(contentType string) *Request {
- r.fallbackContentType = contentType
- return r
-}
-
-// ForceContentType method provides a strong sense of response `Content-Type` for automatic unmarshalling.
-// Resty gives this a higher priority than the `Content-Type` response header. This means that if both
-// `Request.ForceContentType` is set and the response `Content-Type` is available, `ForceContentType` will win.
-func (r *Request) ForceContentType(contentType string) *Request {
- r.forceContentType = contentType
- return r
-}
-
-// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
-//
-// Note: This option only applicable to standard JSON Marshaller.
-func (r *Request) SetJSONEscapeHTML(b bool) *Request {
- r.jsonEscapeHTML = b
- return r
-}
-
-// SetCookie method appends a single cookie in the current request instance.
-// client.R().SetCookie(&http.Cookie{
-// Name:"go-resty",
-// Value:"This is cookie value",
-// })
-//
-// Note: Method appends the Cookie value into existing Cookie if already existing.
-//
-// Since v2.1.0
-func (r *Request) SetCookie(hc *http.Cookie) *Request {
- r.Cookies = append(r.Cookies, hc)
- return r
-}
-
-// SetCookies method sets an array of cookies in the current request instance.
-// cookies := []*http.Cookie{
-// &http.Cookie{
-// Name:"go-resty-1",
-// Value:"This is cookie 1 value",
-// },
-// &http.Cookie{
-// Name:"go-resty-2",
-// Value:"This is cookie 2 value",
-// },
-// }
-//
-// // Setting a cookies into resty's current request
-// client.R().SetCookies(cookies)
-//
-// Note: Method appends the Cookie value into existing Cookie if already existing.
-//
-// Since v2.1.0
-func (r *Request) SetCookies(rs []*http.Cookie) *Request {
- r.Cookies = append(r.Cookies, rs...)
- return r
-}
-
-// AddRetryCondition method adds a retry condition function to the request's
-// array of functions that are checked to determine if the request is retried.
-// The request will retry if any of the functions return true and error is nil.
-//
-// Note: These retry conditions are checked before all retry conditions of the client.
-//
-// Since v2.7.0
-func (r *Request) AddRetryCondition(condition RetryConditionFunc) *Request {
- r.retryConditions = append(r.retryConditions, condition)
- return r
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// HTTP request tracing
-//_______________________________________________________________________
-
-// EnableTrace method enables trace for the current request
-// using `httptrace.ClientTrace` and provides insights.
-//
-// client := resty.New()
-//
-// resp, err := client.R().EnableTrace().Get("https://httpbin.org/get")
-// fmt.Println("Error:", err)
-// fmt.Println("Trace Info:", resp.Request.TraceInfo())
-//
-// See `Client.EnableTrace` available too to get trace info for all requests.
-//
-// Since v2.0.0
-func (r *Request) EnableTrace() *Request {
- r.trace = true
- return r
-}
-
-// TraceInfo method returns the trace info for the request.
-// If either the Client or Request EnableTrace function has not been called
-// prior to the request being made, an empty TraceInfo object will be returned.
-//
-// Since v2.0.0
-func (r *Request) TraceInfo() TraceInfo {
- ct := r.clientTrace
-
- if ct == nil {
- return TraceInfo{}
- }
-
- ti := TraceInfo{
- DNSLookup: ct.dnsDone.Sub(ct.dnsStart),
- TLSHandshake: ct.tlsHandshakeDone.Sub(ct.tlsHandshakeStart),
- ServerTime: ct.gotFirstResponseByte.Sub(ct.gotConn),
- IsConnReused: ct.gotConnInfo.Reused,
- IsConnWasIdle: ct.gotConnInfo.WasIdle,
- ConnIdleTime: ct.gotConnInfo.IdleTime,
- RequestAttempt: r.Attempt,
- }
-
- // Calculate the total time accordingly,
- // when connection is reused
- if ct.gotConnInfo.Reused {
- ti.TotalTime = ct.endTime.Sub(ct.getConn)
- } else {
- ti.TotalTime = ct.endTime.Sub(ct.dnsStart)
- }
-
- // Only calculate on successful connections
- if !ct.connectDone.IsZero() {
- ti.TCPConnTime = ct.connectDone.Sub(ct.dnsDone)
- }
-
- // Only calculate on successful connections
- if !ct.gotConn.IsZero() {
- ti.ConnTime = ct.gotConn.Sub(ct.getConn)
- }
-
- // Only calculate on successful connections
- if !ct.gotFirstResponseByte.IsZero() {
- ti.ResponseTime = ct.endTime.Sub(ct.gotFirstResponseByte)
- }
-
- // Capture remote address info when connection is non-nil
- if ct.gotConnInfo.Conn != nil {
- ti.RemoteAddr = ct.gotConnInfo.Conn.RemoteAddr()
- }
-
- return ti
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// HTTP verb method starts here
-//_______________________________________________________________________
-
-// Get method does GET HTTP request. It's defined in section 4.3.1 of RFC7231.
-func (r *Request) Get(url string) (*Response, error) {
- return r.Execute(MethodGet, url)
-}
-
-// Head method does HEAD HTTP request. It's defined in section 4.3.2 of RFC7231.
-func (r *Request) Head(url string) (*Response, error) {
- return r.Execute(MethodHead, url)
-}
-
-// Post method does POST HTTP request. It's defined in section 4.3.3 of RFC7231.
-func (r *Request) Post(url string) (*Response, error) {
- return r.Execute(MethodPost, url)
-}
-
-// Put method does PUT HTTP request. It's defined in section 4.3.4 of RFC7231.
-func (r *Request) Put(url string) (*Response, error) {
- return r.Execute(MethodPut, url)
-}
-
-// Delete method does DELETE HTTP request. It's defined in section 4.3.5 of RFC7231.
-func (r *Request) Delete(url string) (*Response, error) {
- return r.Execute(MethodDelete, url)
-}
-
-// Options method does OPTIONS HTTP request. It's defined in section 4.3.7 of RFC7231.
-func (r *Request) Options(url string) (*Response, error) {
- return r.Execute(MethodOptions, url)
-}
-
-// Patch method does PATCH HTTP request. It's defined in section 2 of RFC5789.
-func (r *Request) Patch(url string) (*Response, error) {
- return r.Execute(MethodPatch, url)
-}
-
-// Send method performs the HTTP request using the method and URL already defined
-// for current `Request`.
-// req := client.R()
-// req.Method = resty.GET
-// req.URL = "http://httpbin.org/get"
-// resp, err := client.R().Send()
-func (r *Request) Send() (*Response, error) {
- return r.Execute(r.Method, r.URL)
-}
-
-// Execute method performs the HTTP request with given HTTP method and URL
-// for current `Request`.
-// resp, err := client.R().Execute(resty.GET, "http://httpbin.org/get")
-func (r *Request) Execute(method, url string) (*Response, error) {
- var addrs []*net.SRV
- var resp *Response
- var err error
-
- if r.isMultiPart && !(method == MethodPost || method == MethodPut || method == MethodPatch) {
- // No OnError hook here since this is a request validation error
- return nil, fmt.Errorf("multipart content is not allowed in HTTP verb [%v]", method)
- }
-
- if r.SRV != nil {
- _, addrs, err = net.LookupSRV(r.SRV.Service, "tcp", r.SRV.Domain)
- if err != nil {
- r.client.onErrorHooks(r, nil, err)
- return nil, err
- }
- }
-
- r.Method = method
- r.URL = r.selectAddr(addrs, url, 0)
-
- if r.client.RetryCount == 0 {
- r.Attempt = 1
- resp, err = r.client.execute(r)
- r.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))
- return resp, unwrapNoRetryErr(err)
- }
-
- err = Backoff(
- func() (*Response, error) {
- r.Attempt++
-
- r.URL = r.selectAddr(addrs, url, r.Attempt)
-
- resp, err = r.client.execute(r)
- if err != nil {
- r.client.log.Errorf("%v, Attempt %v", err, r.Attempt)
- }
-
- return resp, err
- },
- Retries(r.client.RetryCount),
- WaitTime(r.client.RetryWaitTime),
- MaxWaitTime(r.client.RetryMaxWaitTime),
- RetryConditions(append(r.retryConditions, r.client.RetryConditions...)),
- RetryHooks(r.client.RetryHooks),
- )
-
- r.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))
-
- return resp, unwrapNoRetryErr(err)
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// SRVRecord struct
-//_______________________________________________________________________
-
-// SRVRecord struct holds the data to query the SRV record for the
-// following service.
-type SRVRecord struct {
- Service string
- Domain string
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Request Unexported methods
-//_______________________________________________________________________
-
-func (r *Request) fmtBodyString(sl int64) (body string) {
- body = "***** NO CONTENT *****"
- if !isPayloadSupported(r.Method, r.client.AllowGetMethodPayload) {
- return
- }
-
- if _, ok := r.Body.(io.Reader); ok {
- body = "***** BODY IS io.Reader *****"
- return
- }
-
- // multipart or form-data
- if r.isMultiPart || r.isFormData {
- bodySize := int64(r.bodyBuf.Len())
- if bodySize > sl {
- body = fmt.Sprintf("***** REQUEST TOO LARGE (size - %d) *****", bodySize)
- return
- }
- body = r.bodyBuf.String()
- return
- }
-
- // request body data
- if r.Body == nil {
- return
- }
- var prtBodyBytes []byte
- var err error
-
- contentType := r.Header.Get(hdrContentTypeKey)
- kind := kindOf(r.Body)
- if canJSONMarshal(contentType, kind) {
- prtBodyBytes, err = json.MarshalIndent(&r.Body, "", " ")
- } else if IsXMLType(contentType) && (kind == reflect.Struct) {
- prtBodyBytes, err = xml.MarshalIndent(&r.Body, "", " ")
- } else if b, ok := r.Body.(string); ok {
- if IsJSONType(contentType) {
- bodyBytes := []byte(b)
- out := acquireBuffer()
- defer releaseBuffer(out)
- if err = json.Indent(out, bodyBytes, "", " "); err == nil {
- prtBodyBytes = out.Bytes()
- }
- } else {
- body = b
- }
- } else if b, ok := r.Body.([]byte); ok {
- body = fmt.Sprintf("***** BODY IS byte(s) (size - %d) *****", len(b))
- return
- }
-
- if prtBodyBytes != nil && err == nil {
- body = string(prtBodyBytes)
- }
-
- if len(body) > 0 {
- bodySize := int64(len([]byte(body)))
- if bodySize > sl {
- body = fmt.Sprintf("***** REQUEST TOO LARGE (size - %d) *****", bodySize)
- }
- }
-
- return
-}
-
-func (r *Request) selectAddr(addrs []*net.SRV, path string, attempt int) string {
- if addrs == nil {
- return path
- }
-
- idx := attempt % len(addrs)
- domain := strings.TrimRight(addrs[idx].Target, ".")
- path = strings.TrimLeft(path, "/")
-
- return fmt.Sprintf("%s://%s:%d/%s", r.client.scheme, domain, addrs[idx].Port, path)
-}
-
-func (r *Request) initValuesMap() {
- if r.values == nil {
- r.values = make(map[string]interface{})
- }
-}
-
-var noescapeJSONMarshal = func(v interface{}) (*bytes.Buffer, error) {
- buf := acquireBuffer()
- encoder := json.NewEncoder(buf)
- encoder.SetEscapeHTML(false)
- if err := encoder.Encode(v); err != nil {
- releaseBuffer(buf)
- return nil, err
- }
-
- return buf, nil
-}
diff --git a/vendor/github.com/go-resty/resty/v2/response.go b/vendor/github.com/go-resty/resty/v2/response.go
deleted file mode 100644
index 8ae0e10b..00000000
--- a/vendor/github.com/go-resty/resty/v2/response.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
-// resty source code and usage is governed by a MIT style
-// license that can be found in the LICENSE file.
-
-package resty
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "strings"
- "time"
-)
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Response struct and methods
-//_______________________________________________________________________
-
-// Response struct holds response values of executed request.
-type Response struct {
- Request *Request
- RawResponse *http.Response
-
- body []byte
- size int64
- receivedAt time.Time
-}
-
-// Body method returns HTTP response as []byte array for the executed request.
-//
-// Note: `Response.Body` might be nil, if `Request.SetOutput` is used.
-func (r *Response) Body() []byte {
- if r.RawResponse == nil {
- return []byte{}
- }
- return r.body
-}
-
-// Status method returns the HTTP status string for the executed request.
-// Example: 200 OK
-func (r *Response) Status() string {
- if r.RawResponse == nil {
- return ""
- }
- return r.RawResponse.Status
-}
-
-// StatusCode method returns the HTTP status code for the executed request.
-// Example: 200
-func (r *Response) StatusCode() int {
- if r.RawResponse == nil {
- return 0
- }
- return r.RawResponse.StatusCode
-}
-
-// Proto method returns the HTTP response protocol used for the request.
-func (r *Response) Proto() string {
- if r.RawResponse == nil {
- return ""
- }
- return r.RawResponse.Proto
-}
-
-// Result method returns the response value as an object if it has one
-func (r *Response) Result() interface{} {
- return r.Request.Result
-}
-
-// Error method returns the error object if it has one
-func (r *Response) Error() interface{} {
- return r.Request.Error
-}
-
-// Header method returns the response headers
-func (r *Response) Header() http.Header {
- if r.RawResponse == nil {
- return http.Header{}
- }
- return r.RawResponse.Header
-}
-
-// Cookies method to access all the response cookies
-func (r *Response) Cookies() []*http.Cookie {
- if r.RawResponse == nil {
- return make([]*http.Cookie, 0)
- }
- return r.RawResponse.Cookies()
-}
-
-// String method returns the body of the server response as String.
-func (r *Response) String() string {
- if r.body == nil {
- return ""
- }
- return strings.TrimSpace(string(r.body))
-}
-
-// Time method returns the time of HTTP response time that from request we sent and received a request.
-//
-// See `Response.ReceivedAt` to know when client received response and see `Response.Request.Time` to know
-// when client sent a request.
-func (r *Response) Time() time.Duration {
- if r.Request.clientTrace != nil {
- return r.Request.TraceInfo().TotalTime
- }
- return r.receivedAt.Sub(r.Request.Time)
-}
-
-// ReceivedAt method returns when response got received from server for the request.
-func (r *Response) ReceivedAt() time.Time {
- return r.receivedAt
-}
-
-// Size method returns the HTTP response size in bytes. Ya, you can relay on HTTP `Content-Length` header,
-// however it won't be good for chucked transfer/compressed response. Since Resty calculates response size
-// at the client end. You will get actual size of the http response.
-func (r *Response) Size() int64 {
- return r.size
-}
-
-// RawBody method exposes the HTTP raw response body. Use this method in-conjunction with `SetDoNotParseResponse`
-// option otherwise you get an error as `read err: http: read on closed response body`.
-//
-// Do not forget to close the body, otherwise you might get into connection leaks, no connection reuse.
-// Basically you have taken over the control of response parsing from `Resty`.
-func (r *Response) RawBody() io.ReadCloser {
- if r.RawResponse == nil {
- return nil
- }
- return r.RawResponse.Body
-}
-
-// IsSuccess method returns true if HTTP status `code >= 200 and <= 299` otherwise false.
-func (r *Response) IsSuccess() bool {
- return r.StatusCode() > 199 && r.StatusCode() < 300
-}
-
-// IsError method returns true if HTTP status `code >= 400` otherwise false.
-func (r *Response) IsError() bool {
- return r.StatusCode() > 399
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Response Unexported methods
-//_______________________________________________________________________
-
-func (r *Response) setReceivedAt() {
- r.receivedAt = time.Now()
- if r.Request.clientTrace != nil {
- r.Request.clientTrace.endTime = r.receivedAt
- }
-}
-
-func (r *Response) fmtBodyString(sl int64) string {
- if r.body != nil {
- if int64(len(r.body)) > sl {
- return fmt.Sprintf("***** RESPONSE TOO LARGE (size - %d) *****", len(r.body))
- }
- ct := r.Header().Get(hdrContentTypeKey)
- if IsJSONType(ct) {
- out := acquireBuffer()
- defer releaseBuffer(out)
- err := json.Indent(out, r.body, "", " ")
- if err != nil {
- return fmt.Sprintf("*** Error: Unable to format response body - \"%s\" ***\n\nLog Body as-is:\n%s", err, r.String())
- }
- return out.String()
- }
- return r.String()
- }
-
- return "***** NO CONTENT *****"
-}
diff --git a/vendor/github.com/go-resty/resty/v2/resty.go b/vendor/github.com/go-resty/resty/v2/resty.go
deleted file mode 100644
index 6f9c8b4c..00000000
--- a/vendor/github.com/go-resty/resty/v2/resty.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
-// resty source code and usage is governed by a MIT style
-// license that can be found in the LICENSE file.
-
-// Package resty provides Simple HTTP and REST client library for Go.
-package resty
-
-import (
- "net"
- "net/http"
- "net/http/cookiejar"
-
- "golang.org/x/net/publicsuffix"
-)
-
-// Version # of resty
-const Version = "2.7.0"
-
-// New method creates a new Resty client.
-func New() *Client {
- cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
- return createClient(&http.Client{
- Jar: cookieJar,
- })
-}
-
-// NewWithClient method creates a new Resty client with given `http.Client`.
-func NewWithClient(hc *http.Client) *Client {
- return createClient(hc)
-}
-
-// NewWithLocalAddr method creates a new Resty client with given Local Address
-// to dial from.
-func NewWithLocalAddr(localAddr net.Addr) *Client {
- cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
- return createClient(&http.Client{
- Jar: cookieJar,
- Transport: createTransport(localAddr),
- })
-}
diff --git a/vendor/github.com/go-resty/resty/v2/retry.go b/vendor/github.com/go-resty/resty/v2/retry.go
deleted file mode 100644
index 00b8514a..00000000
--- a/vendor/github.com/go-resty/resty/v2/retry.go
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
-// resty source code and usage is governed by a MIT style
-// license that can be found in the LICENSE file.
-
-package resty
-
-import (
- "context"
- "math"
- "math/rand"
- "sync"
- "time"
-)
-
-const (
- defaultMaxRetries = 3
- defaultWaitTime = time.Duration(100) * time.Millisecond
- defaultMaxWaitTime = time.Duration(2000) * time.Millisecond
-)
-
-type (
- // Option is to create convenient retry options like wait time, max retries, etc.
- Option func(*Options)
-
- // RetryConditionFunc type is for retry condition function
- // input: non-nil Response OR request execution error
- RetryConditionFunc func(*Response, error) bool
-
- // OnRetryFunc is for side-effecting functions triggered on retry
- OnRetryFunc func(*Response, error)
-
- // RetryAfterFunc returns time to wait before retry
- // For example, it can parse HTTP Retry-After header
- // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
- // Non-nil error is returned if it is found that request is not retryable
- // (0, nil) is a special result means 'use default algorithm'
- RetryAfterFunc func(*Client, *Response) (time.Duration, error)
-
- // Options struct is used to hold retry settings.
- Options struct {
- maxRetries int
- waitTime time.Duration
- maxWaitTime time.Duration
- retryConditions []RetryConditionFunc
- retryHooks []OnRetryFunc
- }
-)
-
-// Retries sets the max number of retries
-func Retries(value int) Option {
- return func(o *Options) {
- o.maxRetries = value
- }
-}
-
-// WaitTime sets the default wait time to sleep between requests
-func WaitTime(value time.Duration) Option {
- return func(o *Options) {
- o.waitTime = value
- }
-}
-
-// MaxWaitTime sets the max wait time to sleep between requests
-func MaxWaitTime(value time.Duration) Option {
- return func(o *Options) {
- o.maxWaitTime = value
- }
-}
-
-// RetryConditions sets the conditions that will be checked for retry.
-func RetryConditions(conditions []RetryConditionFunc) Option {
- return func(o *Options) {
- o.retryConditions = conditions
- }
-}
-
-// RetryHooks sets the hooks that will be executed after each retry
-func RetryHooks(hooks []OnRetryFunc) Option {
- return func(o *Options) {
- o.retryHooks = hooks
- }
-}
-
-// Backoff retries with increasing timeout duration up until X amount of retries
-// (Default is 3 attempts, Override with option Retries(n))
-func Backoff(operation func() (*Response, error), options ...Option) error {
- // Defaults
- opts := Options{
- maxRetries: defaultMaxRetries,
- waitTime: defaultWaitTime,
- maxWaitTime: defaultMaxWaitTime,
- retryConditions: []RetryConditionFunc{},
- }
-
- for _, o := range options {
- o(&opts)
- }
-
- var (
- resp *Response
- err error
- )
-
- for attempt := 0; attempt <= opts.maxRetries; attempt++ {
- resp, err = operation()
- ctx := context.Background()
- if resp != nil && resp.Request.ctx != nil {
- ctx = resp.Request.ctx
- }
- if ctx.Err() != nil {
- return err
- }
-
- err1 := unwrapNoRetryErr(err) // raw error, it used for return users callback.
- needsRetry := err != nil && err == err1 // retry on a few operation errors by default
-
- for _, condition := range opts.retryConditions {
- needsRetry = condition(resp, err1)
- if needsRetry {
- break
- }
- }
-
- if !needsRetry {
- return err
- }
-
- for _, hook := range opts.retryHooks {
- hook(resp, err)
- }
-
- // Don't need to wait when no retries left.
- // Still run retry hooks even on last retry to keep compatibility.
- if attempt == opts.maxRetries {
- return err
- }
-
- waitTime, err2 := sleepDuration(resp, opts.waitTime, opts.maxWaitTime, attempt)
- if err2 != nil {
- if err == nil {
- err = err2
- }
- return err
- }
-
- select {
- case <-time.After(waitTime):
- case <-ctx.Done():
- return ctx.Err()
- }
- }
-
- return err
-}
-
-func sleepDuration(resp *Response, min, max time.Duration, attempt int) (time.Duration, error) {
- const maxInt = 1<<31 - 1 // max int for arch 386
- if max < 0 {
- max = maxInt
- }
- if resp == nil {
- return jitterBackoff(min, max, attempt), nil
- }
-
- retryAfterFunc := resp.Request.client.RetryAfter
-
- // Check for custom callback
- if retryAfterFunc == nil {
- return jitterBackoff(min, max, attempt), nil
- }
-
- result, err := retryAfterFunc(resp.Request.client, resp)
- if err != nil {
- return 0, err // i.e. 'API quota exceeded'
- }
- if result == 0 {
- return jitterBackoff(min, max, attempt), nil
- }
- if result < 0 || max < result {
- result = max
- }
- if result < min {
- result = min
- }
- return result, nil
-}
-
-// Return capped exponential backoff with jitter
-// http://www.awsarchitectureblog.com/2015/03/backoff.html
-func jitterBackoff(min, max time.Duration, attempt int) time.Duration {
- base := float64(min)
- capLevel := float64(max)
-
- temp := math.Min(capLevel, base*math.Exp2(float64(attempt)))
- ri := time.Duration(temp / 2)
- result := randDuration(ri)
-
- if result < min {
- result = min
- }
-
- return result
-}
-
-var rnd = newRnd()
-var rndMu sync.Mutex
-
-func randDuration(center time.Duration) time.Duration {
- rndMu.Lock()
- defer rndMu.Unlock()
-
- var ri = int64(center)
- var jitter = rnd.Int63n(ri)
- return time.Duration(math.Abs(float64(ri + jitter)))
-}
-
-func newRnd() *rand.Rand {
- var seed = time.Now().UnixNano()
- var src = rand.NewSource(seed)
- return rand.New(src)
-}
diff --git a/vendor/github.com/go-resty/resty/v2/trace.go b/vendor/github.com/go-resty/resty/v2/trace.go
deleted file mode 100644
index 23cf7033..00000000
--- a/vendor/github.com/go-resty/resty/v2/trace.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
-// resty source code and usage is governed by a MIT style
-// license that can be found in the LICENSE file.
-
-package resty
-
-import (
- "context"
- "crypto/tls"
- "net"
- "net/http/httptrace"
- "time"
-)
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// TraceInfo struct
-//_______________________________________________________________________
-
-// TraceInfo struct is used provide request trace info such as DNS lookup
-// duration, Connection obtain duration, Server processing duration, etc.
-//
-// Since v2.0.0
-type TraceInfo struct {
- // DNSLookup is a duration that transport took to perform
- // DNS lookup.
- DNSLookup time.Duration
-
- // ConnTime is a duration that took to obtain a successful connection.
- ConnTime time.Duration
-
- // TCPConnTime is a duration that took to obtain the TCP connection.
- TCPConnTime time.Duration
-
- // TLSHandshake is a duration that TLS handshake took place.
- TLSHandshake time.Duration
-
- // ServerTime is a duration that server took to respond first byte.
- ServerTime time.Duration
-
- // ResponseTime is a duration since first response byte from server to
- // request completion.
- ResponseTime time.Duration
-
- // TotalTime is a duration that total request took end-to-end.
- TotalTime time.Duration
-
- // IsConnReused is whether this connection has been previously
- // used for another HTTP request.
- IsConnReused bool
-
- // IsConnWasIdle is whether this connection was obtained from an
- // idle pool.
- IsConnWasIdle bool
-
- // ConnIdleTime is a duration how long the connection was previously
- // idle, if IsConnWasIdle is true.
- ConnIdleTime time.Duration
-
- // RequestAttempt is to represent the request attempt made during a Resty
- // request execution flow, including retry count.
- RequestAttempt int
-
- // RemoteAddr returns the remote network address.
- RemoteAddr net.Addr
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// ClientTrace struct and its methods
-//_______________________________________________________________________
-
-// tracer struct maps the `httptrace.ClientTrace` hooks into Fields
-// with same naming for easy understanding. Plus additional insights
-// Request.
-type clientTrace struct {
- getConn time.Time
- dnsStart time.Time
- dnsDone time.Time
- connectDone time.Time
- tlsHandshakeStart time.Time
- tlsHandshakeDone time.Time
- gotConn time.Time
- gotFirstResponseByte time.Time
- endTime time.Time
- gotConnInfo httptrace.GotConnInfo
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Trace unexported methods
-//_______________________________________________________________________
-
-func (t *clientTrace) createContext(ctx context.Context) context.Context {
- return httptrace.WithClientTrace(
- ctx,
- &httptrace.ClientTrace{
- DNSStart: func(_ httptrace.DNSStartInfo) {
- t.dnsStart = time.Now()
- },
- DNSDone: func(_ httptrace.DNSDoneInfo) {
- t.dnsDone = time.Now()
- },
- ConnectStart: func(_, _ string) {
- if t.dnsDone.IsZero() {
- t.dnsDone = time.Now()
- }
- if t.dnsStart.IsZero() {
- t.dnsStart = t.dnsDone
- }
- },
- ConnectDone: func(net, addr string, err error) {
- t.connectDone = time.Now()
- },
- GetConn: func(_ string) {
- t.getConn = time.Now()
- },
- GotConn: func(ci httptrace.GotConnInfo) {
- t.gotConn = time.Now()
- t.gotConnInfo = ci
- },
- GotFirstResponseByte: func() {
- t.gotFirstResponseByte = time.Now()
- },
- TLSHandshakeStart: func() {
- t.tlsHandshakeStart = time.Now()
- },
- TLSHandshakeDone: func(_ tls.ConnectionState, _ error) {
- t.tlsHandshakeDone = time.Now()
- },
- },
- )
-}
diff --git a/vendor/github.com/go-resty/resty/v2/transport.go b/vendor/github.com/go-resty/resty/v2/transport.go
deleted file mode 100644
index e15b48c5..00000000
--- a/vendor/github.com/go-resty/resty/v2/transport.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build go1.13
-
-// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
-// resty source code and usage is governed by a MIT style
-// license that can be found in the LICENSE file.
-
-package resty
-
-import (
- "net"
- "net/http"
- "runtime"
- "time"
-)
-
-func createTransport(localAddr net.Addr) *http.Transport {
- dialer := &net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- DualStack: true,
- }
- if localAddr != nil {
- dialer.LocalAddr = localAddr
- }
- return &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: dialer.DialContext,
- ForceAttemptHTTP2: true,
- MaxIdleConns: 100,
- IdleConnTimeout: 90 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
- }
-}
diff --git a/vendor/github.com/go-resty/resty/v2/transport112.go b/vendor/github.com/go-resty/resty/v2/transport112.go
deleted file mode 100644
index fbbbc591..00000000
--- a/vendor/github.com/go-resty/resty/v2/transport112.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// +build !go1.13
-
-// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
-// resty source code and usage is governed by a MIT style
-// license that can be found in the LICENSE file.
-
-package resty
-
-import (
- "net"
- "net/http"
- "runtime"
- "time"
-)
-
-func createTransport(localAddr net.Addr) *http.Transport {
- dialer := &net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- DualStack: true,
- }
- if localAddr != nil {
- dialer.LocalAddr = localAddr
- }
- return &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: dialer.DialContext,
- MaxIdleConns: 100,
- IdleConnTimeout: 90 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
- }
-}
diff --git a/vendor/github.com/go-resty/resty/v2/util.go b/vendor/github.com/go-resty/resty/v2/util.go
deleted file mode 100644
index 1d563bef..00000000
--- a/vendor/github.com/go-resty/resty/v2/util.go
+++ /dev/null
@@ -1,391 +0,0 @@
-// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
-// resty source code and usage is governed by a MIT style
-// license that can be found in the LICENSE file.
-
-package resty
-
-import (
- "bytes"
- "fmt"
- "io"
- "log"
- "mime/multipart"
- "net/http"
- "net/textproto"
- "os"
- "path/filepath"
- "reflect"
- "runtime"
- "sort"
- "strings"
- "sync"
-)
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Logger interface
-//_______________________________________________________________________
-
-// Logger interface is to abstract the logging from Resty. Gives control to
-// the Resty users, choice of the logger.
-type Logger interface {
- Errorf(format string, v ...interface{})
- Warnf(format string, v ...interface{})
- Debugf(format string, v ...interface{})
-}
-
-func createLogger() *logger {
- l := &logger{l: log.New(os.Stderr, "", log.Ldate|log.Lmicroseconds)}
- return l
-}
-
-var _ Logger = (*logger)(nil)
-
-type logger struct {
- l *log.Logger
-}
-
-func (l *logger) Errorf(format string, v ...interface{}) {
- l.output("ERROR RESTY "+format, v...)
-}
-
-func (l *logger) Warnf(format string, v ...interface{}) {
- l.output("WARN RESTY "+format, v...)
-}
-
-func (l *logger) Debugf(format string, v ...interface{}) {
- l.output("DEBUG RESTY "+format, v...)
-}
-
-func (l *logger) output(format string, v ...interface{}) {
- if len(v) == 0 {
- l.l.Print(format)
- return
- }
- l.l.Printf(format, v...)
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Package Helper methods
-//_______________________________________________________________________
-
-// IsStringEmpty method tells whether given string is empty or not
-func IsStringEmpty(str string) bool {
- return len(strings.TrimSpace(str)) == 0
-}
-
-// DetectContentType method is used to figure out `Request.Body` content type for request header
-func DetectContentType(body interface{}) string {
- contentType := plainTextType
- kind := kindOf(body)
- switch kind {
- case reflect.Struct, reflect.Map:
- contentType = jsonContentType
- case reflect.String:
- contentType = plainTextType
- default:
- if b, ok := body.([]byte); ok {
- contentType = http.DetectContentType(b)
- } else if kind == reflect.Slice {
- contentType = jsonContentType
- }
- }
-
- return contentType
-}
-
-// IsJSONType method is to check JSON content type or not
-func IsJSONType(ct string) bool {
- return jsonCheck.MatchString(ct)
-}
-
-// IsXMLType method is to check XML content type or not
-func IsXMLType(ct string) bool {
- return xmlCheck.MatchString(ct)
-}
-
-// Unmarshalc content into object from JSON or XML
-func Unmarshalc(c *Client, ct string, b []byte, d interface{}) (err error) {
- if IsJSONType(ct) {
- err = c.JSONUnmarshal(b, d)
- } else if IsXMLType(ct) {
- err = c.XMLUnmarshal(b, d)
- }
-
- return
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// RequestLog and ResponseLog type
-//_______________________________________________________________________
-
-// RequestLog struct is used to collected information from resty request
-// instance for debug logging. It sent to request log callback before resty
-// actually logs the information.
-type RequestLog struct {
- Header http.Header
- Body string
-}
-
-// ResponseLog struct is used to collected information from resty response
-// instance for debug logging. It sent to response log callback before resty
-// actually logs the information.
-type ResponseLog struct {
- Header http.Header
- Body string
-}
-
-//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
-// Package Unexported methods
-//_______________________________________________________________________
-
-// way to disable the HTML escape as opt-in
-func jsonMarshal(c *Client, r *Request, d interface{}) (*bytes.Buffer, error) {
- if !r.jsonEscapeHTML || !c.jsonEscapeHTML {
- return noescapeJSONMarshal(d)
- }
-
- data, err := c.JSONMarshal(d)
- if err != nil {
- return nil, err
- }
-
- buf := acquireBuffer()
- _, _ = buf.Write(data)
- return buf, nil
-}
-
-func firstNonEmpty(v ...string) string {
- for _, s := range v {
- if !IsStringEmpty(s) {
- return s
- }
- }
- return ""
-}
-
-var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
-
-func escapeQuotes(s string) string {
- return quoteEscaper.Replace(s)
-}
-
-func createMultipartHeader(param, fileName, contentType string) textproto.MIMEHeader {
- hdr := make(textproto.MIMEHeader)
-
- var contentDispositionValue string
- if IsStringEmpty(fileName) {
- contentDispositionValue = fmt.Sprintf(`form-data; name="%s"`, param)
- } else {
- contentDispositionValue = fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
- param, escapeQuotes(fileName))
- }
- hdr.Set("Content-Disposition", contentDispositionValue)
-
- if !IsStringEmpty(contentType) {
- hdr.Set(hdrContentTypeKey, contentType)
- }
- return hdr
-}
-
-func addMultipartFormField(w *multipart.Writer, mf *MultipartField) error {
- partWriter, err := w.CreatePart(createMultipartHeader(mf.Param, mf.FileName, mf.ContentType))
- if err != nil {
- return err
- }
-
- _, err = io.Copy(partWriter, mf.Reader)
- return err
-}
-
-func writeMultipartFormFile(w *multipart.Writer, fieldName, fileName string, r io.Reader) error {
- // Auto detect actual multipart content type
- cbuf := make([]byte, 512)
- size, err := r.Read(cbuf)
- if err != nil && err != io.EOF {
- return err
- }
-
- partWriter, err := w.CreatePart(createMultipartHeader(fieldName, fileName, http.DetectContentType(cbuf)))
- if err != nil {
- return err
- }
-
- if _, err = partWriter.Write(cbuf[:size]); err != nil {
- return err
- }
-
- _, err = io.Copy(partWriter, r)
- return err
-}
-
-func addFile(w *multipart.Writer, fieldName, path string) error {
- file, err := os.Open(path)
- if err != nil {
- return err
- }
- defer closeq(file)
- return writeMultipartFormFile(w, fieldName, filepath.Base(path), file)
-}
-
-func addFileReader(w *multipart.Writer, f *File) error {
- return writeMultipartFormFile(w, f.ParamName, f.Name, f.Reader)
-}
-
-func getPointer(v interface{}) interface{} {
- vv := valueOf(v)
- if vv.Kind() == reflect.Ptr {
- return v
- }
- return reflect.New(vv.Type()).Interface()
-}
-
-func isPayloadSupported(m string, allowMethodGet bool) bool {
- return !(m == MethodHead || m == MethodOptions || (m == MethodGet && !allowMethodGet))
-}
-
-func typeOf(i interface{}) reflect.Type {
- return indirect(valueOf(i)).Type()
-}
-
-func valueOf(i interface{}) reflect.Value {
- return reflect.ValueOf(i)
-}
-
-func indirect(v reflect.Value) reflect.Value {
- return reflect.Indirect(v)
-}
-
-func kindOf(v interface{}) reflect.Kind {
- return typeOf(v).Kind()
-}
-
-func createDirectory(dir string) (err error) {
- if _, err = os.Stat(dir); err != nil {
- if os.IsNotExist(err) {
- if err = os.MkdirAll(dir, 0755); err != nil {
- return
- }
- }
- }
- return
-}
-
-func canJSONMarshal(contentType string, kind reflect.Kind) bool {
- return IsJSONType(contentType) && (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice)
-}
-
-func functionName(i interface{}) string {
- return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
-}
-
-func acquireBuffer() *bytes.Buffer {
- return bufPool.Get().(*bytes.Buffer)
-}
-
-func releaseBuffer(buf *bytes.Buffer) {
- if buf != nil {
- buf.Reset()
- bufPool.Put(buf)
- }
-}
-
-// requestBodyReleaser wraps requests's body and implements custom Close for it.
-// The Close method closes original body and releases request body back to sync.Pool.
-type requestBodyReleaser struct {
- releaseOnce sync.Once
- reqBuf *bytes.Buffer
- io.ReadCloser
-}
-
-func newRequestBodyReleaser(respBody io.ReadCloser, reqBuf *bytes.Buffer) io.ReadCloser {
- if reqBuf == nil {
- return respBody
- }
-
- return &requestBodyReleaser{
- reqBuf: reqBuf,
- ReadCloser: respBody,
- }
-}
-
-func (rr *requestBodyReleaser) Close() error {
- err := rr.ReadCloser.Close()
- rr.releaseOnce.Do(func() {
- releaseBuffer(rr.reqBuf)
- })
-
- return err
-}
-
-func closeq(v interface{}) {
- if c, ok := v.(io.Closer); ok {
- silently(c.Close())
- }
-}
-
-func silently(_ ...interface{}) {}
-
-func composeHeaders(c *Client, r *Request, hdrs http.Header) string {
- str := make([]string, 0, len(hdrs))
- for _, k := range sortHeaderKeys(hdrs) {
- var v string
- if k == "Cookie" {
- cv := strings.TrimSpace(strings.Join(hdrs[k], ", "))
- if c.GetClient().Jar != nil {
- for _, c := range c.GetClient().Jar.Cookies(r.RawRequest.URL) {
- if cv != "" {
- cv = cv + "; " + c.String()
- } else {
- cv = c.String()
- }
- }
- }
- v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, cv))
- } else {
- v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, strings.Join(hdrs[k], ", ")))
- }
- if v != "" {
- str = append(str, "\t"+v)
- }
- }
- return strings.Join(str, "\n")
-}
-
-func sortHeaderKeys(hdrs http.Header) []string {
- keys := make([]string, 0, len(hdrs))
- for key := range hdrs {
- keys = append(keys, key)
- }
- sort.Strings(keys)
- return keys
-}
-
-func copyHeaders(hdrs http.Header) http.Header {
- nh := http.Header{}
- for k, v := range hdrs {
- nh[k] = v
- }
- return nh
-}
-
-type noRetryErr struct {
- err error
-}
-
-func (e *noRetryErr) Error() string {
- return e.err.Error()
-}
-
-func wrapNoRetryErr(err error) error {
- if err != nil {
- err = &noRetryErr{err: err}
- }
- return err
-}
-
-func unwrapNoRetryErr(err error) error {
- if e, ok := err.(*noRetryErr); ok {
- err = e.err
- }
- return err
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
index 05132751..ec346e20 100644
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -13,29 +13,41 @@
Aaron Hopkins
Achille Roussel
+Aidan
Alex Snast
Alexey Palazhchenko
Andrew Reid
Animesh Ray
Arne Hormann
Ariel Mashraki
+Artur Melanchyk
Asta Xie
+B Lamarche
+Bes Dollma
+Bogdan Constantinescu
+Brad Higgins
+Brian Hendriks
Bulat Gaifullin
Caine Jette
Carlos Nieto
Chris Kirkland
Chris Moos
Craig Wilson
+Daemonxiao <735462752 at qq.com>
Daniel Montoya
Daniel Nichter
Daniël van Eeden
Dave Protasowski
+Diego Dupin
+Dirkjan Bussink
DisposaBoy
Egor Smolyakov
Erwan Martin
+Evan Elias
Evan Shaw
Frederick Mayle
Gustavo Kristic
+Gusted
Hajime Nakagami
Hanno Braun
Henri Yandell
@@ -45,13 +57,18 @@ ICHINOSE Shogo
Ilia Cimpoes
INADA Naoki
Jacek Szwec
+Jakub Adamus
James Harr
Janek Vedock
+Jason Ng
+Jean-Yves Pellé
Jeff Hodges
Jeffrey Charles
+Jennifer Purevsuren
Jerome Meyer
Jiajia Zhong
Jian Zhen
+Joe Mann
Joshua Prunier
Julien Lefevre
Julien Schmidt
@@ -72,17 +89,23 @@ Lunny Xiao
Luke Scott
Maciej Zimnoch
Michael Woolnough
+Nao Yokotsuka
Nathanial Murphy
Nicola Peduzzi
+Oliver Bone
Olivier Mengué
oscarzhao
Paul Bonser
+Paulius Lozys
Peter Schultz
+Phil Porada
+Minh Quang
Rebecca Chin
Reed Allman
Richard Wilkes
Robert Russell
Runrioter Wung
+Samantha Frank
Santhosh Kumar Tekuri
Sho Iizuka
Sho Ikeda
@@ -93,6 +116,7 @@ Stan Putrya
Stanley Gunawan
Steven Hartland
Tan Jinhua <312841925 at qq.com>
+Tetsuro Aoki
Thomas Wodarek
Tim Ruffles
Tom Jenkinson
@@ -102,6 +126,7 @@ Xiangyu Hu
Xiaobing Jiang
Xiuming Chen
Xuehong Chan
+Zhang Xiang
Zhenye Xie
Zhixin Wen
Ziheng Lyu
@@ -110,15 +135,21 @@ Ziheng Lyu
Barracuda Networks, Inc.
Counting Ltd.
+Defined Networking Inc.
DigitalOcean Inc.
+Dolthub Inc.
dyves labs AG
Facebook Inc.
GitHub Inc.
Google Inc.
InfoSum Ltd.
Keybase Inc.
+Microsoft Corp.
Multiplay Ltd.
Percona LLC
+PingCAP Inc.
Pivotal Inc.
+Shattered Silicon Ltd.
Stripe Inc.
+ThousandEyes
Zendesk Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
index 77024a82..75674b60 100644
--- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -1,3 +1,110 @@
+# Changelog
+
+## v1.9.3 (2025-06-13)
+
+* `tx.Commit()` and `tx.Rollback()` returned `ErrInvalidConn` always.
+ Now they return cached real error if present. (#1690)
+
+* Optimize reading small resultsets to fix performance regression
+ introduced by compression protocol support. (#1707)
+
+* Fix `db.Ping()` on compressed connection. (#1723)
+
+
+## v1.9.2 (2025-04-07)
+
+v1.9.2 is a re-release of v1.9.1 due to a release process issue; no changes were made to the content.
+
+
+## v1.9.1 (2025-03-21)
+
+### Major Changes
+
+* Add Charset() option. (#1679)
+
+### Bugfixes
+
+* go.mod: fix go version format (#1682)
+* Fix FormatDSN missing ConnectionAttributes (#1619)
+
+## v1.9.0 (2025-02-18)
+
+### Major Changes
+
+- Implement zlib compression. (#1487)
+- Supported Go version is updated to Go 1.21+. (#1639)
+- Add support for VECTOR type introduced in MySQL 9.0. (#1609)
+- Config object can have custom dial function. (#1527)
+
+### Bugfixes
+
+- Fix auth errors when username/password are too long. (#1625)
+- Check if MySQL supports CLIENT_CONNECT_ATTRS before sending client attributes. (#1640)
+- Fix auth switch request handling. (#1666)
+
+### Other changes
+
+- Add "filename:line" prefix to log in go-mysql. Custom loggers now show it. (#1589)
+- Improve error handling. It reduces the "busy buffer" errors. (#1595, #1601, #1641)
+- Use `strconv.Atoi` to parse max_allowed_packet. (#1661)
+- `rejectReadOnly` option now handles ER_READ_ONLY_MODE (1290) error too. (#1660)
+
+
+## Version 1.8.1 (2024-03-26)
+
+Bugfixes:
+
+- fix race condition when context is canceled in [#1562](https://github.com/go-sql-driver/mysql/pull/1562) and [#1570](https://github.com/go-sql-driver/mysql/pull/1570)
+
+## Version 1.8.0 (2024-03-09)
+
+Major Changes:
+
+- Use `SET NAMES charset COLLATE collation`. by @methane in [#1437](https://github.com/go-sql-driver/mysql/pull/1437)
+ - Older go-mysql-driver used `collation_id` in the handshake packet. But it caused collation mismatch in some situation.
+ - If you don't specify charset nor collation, go-mysql-driver sends `SET NAMES utf8mb4` for new connection. This uses server's default collation for utf8mb4.
+ - If you specify charset, go-mysql-driver sends `SET NAMES `. This uses the server's default collation for ``.
+ - If you specify collation and/or charset, go-mysql-driver sends `SET NAMES charset COLLATE collation`.
+- PathEscape dbname in DSN. by @methane in [#1432](https://github.com/go-sql-driver/mysql/pull/1432)
+ - This is backward incompatible in rare case. Check your DSN.
+- Drop Go 1.13-17 support by @methane in [#1420](https://github.com/go-sql-driver/mysql/pull/1420)
+ - Use Go 1.18+
+- Parse numbers on text protocol too by @methane in [#1452](https://github.com/go-sql-driver/mysql/pull/1452)
+ - When text protocol is used, go-mysql-driver passed bare `[]byte` to database/sql for avoid unnecessary allocation and conversion.
+ - If user specified `*any` to `Scan()`, database/sql passed the `[]byte` into the target variable.
+ - This confused users because most user doesn't know when text/binary protocol used.
+ - go-mysql-driver 1.8 converts integer/float values into int64/double even in text protocol. This doesn't increase allocation compared to `[]byte` and conversion cost is negatable.
+- New options start using the Functional Option Pattern to avoid increasing technical debt in the Config object. Future version may introduce Functional Option for existing options, but not for now.
+ - Make TimeTruncate functional option by @methane in [1552](https://github.com/go-sql-driver/mysql/pull/1552)
+ - Add BeforeConnect callback to configuration object by @ItalyPaleAle in [#1469](https://github.com/go-sql-driver/mysql/pull/1469)
+
+
+Other changes:
+
+- Adding DeregisterDialContext to prevent memory leaks with dialers we don't need anymore by @jypelle in https://github.com/go-sql-driver/mysql/pull/1422
+- Make logger configurable per connection by @frozenbonito in https://github.com/go-sql-driver/mysql/pull/1408
+- Fix ColumnType.DatabaseTypeName for mediumint unsigned by @evanelias in https://github.com/go-sql-driver/mysql/pull/1428
+- Add connection attributes by @Daemonxiao in https://github.com/go-sql-driver/mysql/pull/1389
+- Stop `ColumnTypeScanType()` from returning `sql.RawBytes` by @methane in https://github.com/go-sql-driver/mysql/pull/1424
+- Exec() now provides access to status of multiple statements. by @mherr-google in https://github.com/go-sql-driver/mysql/pull/1309
+- Allow to change (or disable) the default driver name for registration by @dolmen in https://github.com/go-sql-driver/mysql/pull/1499
+- Add default connection attribute '_server_host' by @oblitorum in https://github.com/go-sql-driver/mysql/pull/1506
+- QueryUnescape DSN ConnectionAttribute value by @zhangyangyu in https://github.com/go-sql-driver/mysql/pull/1470
+- Add client_ed25519 authentication by @Gusted in https://github.com/go-sql-driver/mysql/pull/1518
+
+## Version 1.7.1 (2023-04-25)
+
+Changes:
+
+ - bump actions/checkout@v3 and actions/setup-go@v3 (#1375)
+ - Add go1.20 and mariadb10.11 to the testing matrix (#1403)
+ - Increase default maxAllowedPacket size. (#1411)
+
+Bugfixes:
+
+ - Use SET syntax as specified in the MySQL documentation (#1402)
+
+
## Version 1.7 (2022-11-29)
Changes:
@@ -149,7 +256,7 @@ New Features:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Support for returning table alias on Columns() (#289, #359, #382)
- - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Placeholder interpolation, can be activated with the DSN parameter `interpolateParams=true` (#309, #318, #490)
- Support for uint64 parameters with high bit set (#332, #345)
- Cleartext authentication plugin support (#327)
- Exported ParseDSN function and the Config struct (#403, #419, #429)
@@ -193,7 +300,7 @@ Changes:
- Also exported the MySQLWarning type
- mysqlConn.Close returns the first error encountered instead of ignoring all errors
- writePacket() automatically writes the packet size to the header
- - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+ - readPacket() uses an iterative approach instead of the recursive approach to merge split packets
New Features:
@@ -241,7 +348,7 @@ Bugfixes:
- Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
- Convert to DB timezone when inserting `time.Time`
- - Splitted packets (more than 16MB) are now merged correctly
+ - Split packets (more than 16MB) are now merged correctly
- Fixed false positive `io.EOF` errors when the data was fully read
- Avoid panics on reuse of closed connections
- Fixed empty string producing false nil values
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
index 25de2e5a..da4593cc 100644
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -38,17 +38,26 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
* Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
* Optional `time.Time` parsing
* Optional placeholder interpolation
+ * Supports zlib compression.
## Requirements
- * Go 1.13 or higher. We aim to support the 3 latest versions of Go.
- * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+* Go 1.21 or higher. We aim to support the 3 latest versions of Go.
+* MySQL (5.7+) and MariaDB (10.5+) are supported.
+* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP.
+ * Do not ask questions about TiDB in our issue tracker or forum.
+ * [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang)
+ * [Forum](https://ask.pingcap.com/)
+* go-mysql would work with Percona Server, Google CloudSQL or Sphinx (2.2.3+).
+ * Maintainers won't support them. Do not expect issues are investigated and resolved by maintainers.
+ * Investigate issues yourself and please send a pull request to fix it.
---------------------------------------
## Installation
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
-$ go get -u github.com/go-sql-driver/mysql
+go get -u github.com/go-sql-driver/mysql
```
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
@@ -114,6 +123,12 @@ This has the same effect as an empty DSN string:
```
+`dbname` is escaped by [PathEscape()](https://pkg.go.dev/net/url#PathEscape) since v1.8.0. If your database name is `dbname/withslash`, it becomes:
+
+```
+/dbname%2Fwithslash
+```
+
Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
#### Password
@@ -121,7 +136,7 @@ Passwords can consist of any character. Escaping is **not** necessary.
#### Protocol
See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
-In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+In general you should use a Unix domain socket if available and TCP otherwise for best performance.
#### Address
For TCP and UDP networks, addresses have the form `host[:port]`.
@@ -145,7 +160,7 @@ Default: false
```
`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
-[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+[*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)
##### `allowCleartextPasswords`
@@ -194,10 +209,9 @@ Valid Values:
Default: none
```
-Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset fails. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
-Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
-Unless you need the fallback behavior, please use `collation` instead.
+See also [Unicode Support](#unicode-support).
##### `checkConnLiveness`
@@ -226,6 +240,7 @@ The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You s
Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
+See also [Unicode Support](#unicode-support).
##### `clientFoundRows`
@@ -253,6 +268,16 @@ SELECT u.id FROM users as u
will return `u.id` instead of just `id` if `columnsWithAlias=true`.
+##### `compress`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+Toggles zlib compression. false by default.
+
##### `interpolateParams`
```
@@ -279,13 +304,22 @@ Note that this sets the location for time.Time values but does not change MySQL'
Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+##### `timeTruncate`
+
+```
+Type: duration
+Default: 0
+```
+
+[Truncate time values](https://pkg.go.dev/time#Duration.Truncate) to the specified duration. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
##### `maxAllowedPacket`
```
Type: decimal number
-Default: 4194304
+Default: 64*1024*1024
```
-Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
+Max packet size allowed in bytes. The default value is 64 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
##### `multiStatements`
@@ -295,9 +329,25 @@ Valid Values: true, false
Default: false
```
-Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+Allow multiple statements in one query. This can be used to bach multiple queries. Use [Rows.NextResultSet()](https://pkg.go.dev/database/sql#Rows.NextResultSet) to get result of the second and subsequent queries.
-When `multiStatements` is used, `?` parameters must only be used in the first statement.
+When `multiStatements` is used, `?` parameters must only be used in the first statement. [interpolateParams](#interpolateparams) can be used to avoid this limitation unless prepared statement is used explicitly.
+
+It's possible to access the last inserted ID and number of affected rows for multiple statements by using `sql.Conn.Raw()` and the `mysql.Result`. For example:
+
+```go
+conn, _ := db.Conn(ctx)
+conn.Raw(func(conn any) error {
+ ex := conn.(driver.Execer)
+ res, err := ex.Exec(`
+ UPDATE point SET x = 1 WHERE y = 2;
+ UPDATE point SET x = 2 WHERE y = 3;
+ `, nil)
+ // Both slices have 2 elements.
+ log.Print(res.(mysql.Result).AllRowsAffected())
+ log.Print(res.(mysql.Result).AllLastInsertIds())
+})
+```
##### `parseTime`
@@ -393,6 +443,15 @@ Default: 0
I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+##### `connectionAttributes`
+
+```
+Type: comma-delimited string of user-defined "key:value" pairs
+Valid Values: (:,:,...)
+Default: none
+```
+
+[Connection attributes](https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html) are key-value pairs that application programs can pass to the server at connect time.
##### System Variables
@@ -465,12 +524,15 @@ user:password@/
The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
## `ColumnType` Support
-This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `BIGINT`.
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `MEDIUMINT`, `BIGINT`.
## `context.Context` Support
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+> [!IMPORTANT]
+> The `QueryContext`, `ExecContext`, etc. variants provided by `database/sql` will cause the connection to be closed if the provided context is cancelled or timed out before the result is received by the driver.
+
### `LOAD DATA LOCAL INFILE` support
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
@@ -478,7 +540,7 @@ For this feature you need direct access to the package. Therefore you must chang
import "github.com/go-sql-driver/mysql"
```
-Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)).
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
@@ -496,9 +558,11 @@ However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` v
### Unicode support
Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
-Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+Other charsets / collations can be set using the [`charset`](#charset) or [`collation`](#collation) DSN parameter.
-Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+- When only the `charset` is specified, the `SET NAMES ` query is sent and the server's default collation is used.
+- When both the `charset` and `collation` are specified, the `SET NAMES COLLATE ` query is sent.
+- When only the `collation` is specified, the collation is specified in the protocol handshake and the `SET NAMES` query is not sent. This can save one roundtrip, but note that the server may ignore the specified collation silently and use the server's default charset/collation instead.
See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool.go
deleted file mode 100644
index 1b7e19f3..00000000
--- a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
-//
-// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-//go:build go1.19
-// +build go1.19
-
-package mysql
-
-import "sync/atomic"
-
-/******************************************************************************
-* Sync utils *
-******************************************************************************/
-
-type atomicBool = atomic.Bool
diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
deleted file mode 100644
index 2e9a7f0b..00000000
--- a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
-//
-// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-//go:build !go1.19
-// +build !go1.19
-
-package mysql
-
-import "sync/atomic"
-
-/******************************************************************************
-* Sync utils *
-******************************************************************************/
-
-// atomicBool is an implementation of atomic.Bool for older version of Go.
-// it is a wrapper around uint32 for usage as a boolean value with
-// atomic access.
-type atomicBool struct {
- _ noCopy
- value uint32
-}
-
-// Load returns whether the current boolean value is true
-func (ab *atomicBool) Load() bool {
- return atomic.LoadUint32(&ab.value) > 0
-}
-
-// Store sets the value of the bool regardless of the previous value
-func (ab *atomicBool) Store(value bool) {
- if value {
- atomic.StoreUint32(&ab.value, 1)
- } else {
- atomic.StoreUint32(&ab.value, 0)
- }
-}
-
-// Swap sets the value of the bool and returns the old value.
-func (ab *atomicBool) Swap(value bool) bool {
- if value {
- return atomic.SwapUint32(&ab.value, 1) > 0
- }
- return atomic.SwapUint32(&ab.value, 0) > 0
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
index 1ff203e5..74e1bd03 100644
--- a/vendor/github.com/go-sql-driver/mysql/auth.go
+++ b/vendor/github.com/go-sql-driver/mysql/auth.go
@@ -13,10 +13,13 @@ import (
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
+ "crypto/sha512"
"crypto/x509"
"encoding/pem"
"fmt"
"sync"
+
+ "filippo.io/edwards25519"
)
// server pub keys registry
@@ -33,7 +36,7 @@ var (
// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
// after registering it and may not be modified.
//
-// data, err := ioutil.ReadFile("mykey.pem")
+// data, err := os.ReadFile("mykey.pem")
// if err != nil {
// log.Fatal(err)
// }
@@ -225,6 +228,44 @@ func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte,
return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
}
+// authEd25519 does ed25519 authentication used by MariaDB.
+func authEd25519(scramble []byte, password string) ([]byte, error) {
+ // Derived from https://github.com/MariaDB/server/blob/d8e6bb00888b1f82c031938f4c8ac5d97f6874c3/plugin/auth_ed25519/ref10/sign.c
+ // Code style is from https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/crypto/ed25519/ed25519.go;l=207
+ h := sha512.Sum512([]byte(password))
+
+ s, err := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
+ if err != nil {
+ return nil, err
+ }
+ A := (&edwards25519.Point{}).ScalarBaseMult(s)
+
+ mh := sha512.New()
+ mh.Write(h[32:])
+ mh.Write(scramble)
+ messageDigest := mh.Sum(nil)
+ r, err := edwards25519.NewScalar().SetUniformBytes(messageDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ R := (&edwards25519.Point{}).ScalarBaseMult(r)
+
+ kh := sha512.New()
+ kh.Write(R.Bytes())
+ kh.Write(A.Bytes())
+ kh.Write(scramble)
+ hramDigest := kh.Sum(nil)
+ k, err := edwards25519.NewScalar().SetUniformBytes(hramDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ S := k.MultiplyAdd(k, s, r)
+
+ return append(R.Bytes(), S.Bytes()...), nil
+}
+
func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
if err != nil {
@@ -290,8 +331,14 @@ func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
return enc, err
+ case "client_ed25519":
+ if len(authData) != 32 {
+ return nil, ErrMalformPkt
+ }
+ return authEd25519(authData, mc.cfg.Passwd)
+
default:
- errLog.Print("unknown auth plugin:", plugin)
+ mc.log("unknown auth plugin:", plugin)
return nil, ErrUnknownPlugin
}
}
@@ -338,7 +385,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
switch plugin {
- // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ // https://dev.mysql.com/blog-archive/preparing-your-community-connector-for-mysql-8-part-2-sha256/
case "caching_sha2_password":
switch len(authData) {
case 0:
@@ -346,7 +393,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
case 1:
switch authData[0] {
case cachingSha2PasswordFastAuthSuccess:
- if err = mc.readResultOK(); err == nil {
+ if err = mc.resultUnchanged().readResultOK(); err == nil {
return nil // auth successful
}
@@ -376,13 +423,13 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
}
if data[0] != iAuthMoreData {
- return fmt.Errorf("unexpect resp from server for caching_sha2_password perform full authentication")
+ return fmt.Errorf("unexpected resp from server for caching_sha2_password, perform full authentication")
}
// parse public key
block, rest := pem.Decode(data[1:])
if block == nil {
- return fmt.Errorf("No Pem data found, data: %s", rest)
+ return fmt.Errorf("no pem data found, data: %s", rest)
}
pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
@@ -397,7 +444,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
return err
}
}
- return mc.readResultOK()
+ return mc.resultUnchanged().readResultOK()
default:
return ErrMalformPkt
@@ -426,7 +473,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
if err != nil {
return err
}
- return mc.readResultOK()
+ return mc.resultUnchanged().readResultOK()
}
default:
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
index 0774c5c8..f895e87b 100644
--- a/vendor/github.com/go-sql-driver/mysql/buffer.go
+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -10,54 +10,47 @@ package mysql
import (
"io"
- "net"
- "time"
)
const defaultBufSize = 4096
const maxCachedBufSize = 256 * 1024
+// readerFunc is a function that compatible with io.Reader.
+// We use this function type instead of io.Reader because we want to
+// just pass mc.readWithTimeout.
+type readerFunc func([]byte) (int, error)
+
// A buffer which is used for both reading and writing.
// This is possible since communication on each connection is synchronous.
// In other words, we can't write and read simultaneously on the same connection.
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
// Also highly optimized for this particular use case.
-// This buffer is backed by two byte slices in a double-buffering scheme
type buffer struct {
- buf []byte // buf is a byte buffer who's length and capacity are equal.
- nc net.Conn
- idx int
- length int
- timeout time.Duration
- dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
- flipcnt uint // flipccnt is the current buffer counter for double-buffering
+ buf []byte // read buffer.
+ cachedBuf []byte // buffer that will be reused. len(cachedBuf) <= maxCachedBufSize.
}
// newBuffer allocates and returns a new buffer.
-func newBuffer(nc net.Conn) buffer {
- fg := make([]byte, defaultBufSize)
+func newBuffer() buffer {
return buffer{
- buf: fg,
- nc: nc,
- dbuf: [2][]byte{fg, nil},
+ cachedBuf: make([]byte, defaultBufSize),
}
}
-// flip replaces the active buffer with the background buffer
-// this is a delayed flip that simply increases the buffer counter;
-// the actual flip will be performed the next time we call `buffer.fill`
-func (b *buffer) flip() {
- b.flipcnt += 1
+// busy returns true if the read buffer is not empty.
+func (b *buffer) busy() bool {
+ return len(b.buf) > 0
}
-// fill reads into the buffer until at least _need_ bytes are in it
-func (b *buffer) fill(need int) error {
- n := b.length
- // fill data into its double-buffering target: if we've called
- // flip on this buffer, we'll be copying to the background buffer,
- // and then filling it with network data; otherwise we'll just move
- // the contents of the current buffer to the front before filling it
- dest := b.dbuf[b.flipcnt&1]
+// len returns how many bytes in the read buffer.
+func (b *buffer) len() int {
+ return len(b.buf)
+}
+
+// fill reads into the read buffer until at least _need_ bytes are in it.
+func (b *buffer) fill(need int, r readerFunc) error {
+ // we'll move the contents of the current buffer to dest before filling it.
+ dest := b.cachedBuf
// grow buffer if necessary to fit the whole packet.
if need > len(dest) {
@@ -67,64 +60,41 @@ func (b *buffer) fill(need int) error {
// if the allocated buffer is not too large, move it to backing storage
// to prevent extra allocations on applications that perform large reads
if len(dest) <= maxCachedBufSize {
- b.dbuf[b.flipcnt&1] = dest
+ b.cachedBuf = dest
}
}
- // if we're filling the fg buffer, move the existing data to the start of it.
- // if we're filling the bg buffer, copy over the data
- if n > 0 {
- copy(dest[:n], b.buf[b.idx:])
- }
-
- b.buf = dest
- b.idx = 0
+ // move the existing data to the start of the buffer.
+ n := len(b.buf)
+ copy(dest[:n], b.buf)
for {
- if b.timeout > 0 {
- if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
- return err
- }
- }
-
- nn, err := b.nc.Read(b.buf[n:])
+ nn, err := r(dest[n:])
n += nn
- switch err {
- case nil:
- if n < need {
- continue
- }
- b.length = n
- return nil
-
- case io.EOF:
- if n >= need {
- b.length = n
- return nil
- }
- return io.ErrUnexpectedEOF
-
- default:
- return err
+ if err == nil && n < need {
+ continue
}
+
+ b.buf = dest[:n]
+
+ if err == io.EOF {
+ if n < need {
+ err = io.ErrUnexpectedEOF
+ } else {
+ err = nil
+ }
+ }
+ return err
}
}
// returns next N bytes from buffer.
// The returned slice is only guaranteed to be valid until the next read
-func (b *buffer) readNext(need int) ([]byte, error) {
- if b.length < need {
- // refill
- if err := b.fill(need); err != nil {
- return nil, err
- }
- }
-
- offset := b.idx
- b.idx += need
- b.length -= need
- return b.buf[offset:b.idx], nil
+func (b *buffer) readNext(need int) []byte {
+ data := b.buf[:need:need]
+ b.buf = b.buf[need:]
+ return data
}
// takeBuffer returns a buffer with the requested size.
@@ -132,18 +102,18 @@ func (b *buffer) readNext(need int) ([]byte, error) {
// Otherwise a bigger buffer is made.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeBuffer(length int) ([]byte, error) {
- if b.length > 0 {
+ if b.busy() {
return nil, ErrBusyBuffer
}
// test (cheap) general case first
- if length <= cap(b.buf) {
- return b.buf[:length], nil
+ if length <= len(b.cachedBuf) {
+ return b.cachedBuf[:length], nil
}
- if length < maxPacketSize {
- b.buf = make([]byte, length)
- return b.buf, nil
+ if length < maxCachedBufSize {
+ b.cachedBuf = make([]byte, length)
+ return b.cachedBuf, nil
}
// buffer is larger than we want to store.
@@ -154,10 +124,10 @@ func (b *buffer) takeBuffer(length int) ([]byte, error) {
// known to be smaller than defaultBufSize.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
- if b.length > 0 {
+ if b.busy() {
return nil, ErrBusyBuffer
}
- return b.buf[:length], nil
+ return b.cachedBuf[:length], nil
}
// takeCompleteBuffer returns the complete existing buffer.
@@ -165,18 +135,15 @@ func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
// cap and len of the returned buffer will be equal.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeCompleteBuffer() ([]byte, error) {
- if b.length > 0 {
+ if b.busy() {
return nil, ErrBusyBuffer
}
- return b.buf, nil
+ return b.cachedBuf, nil
}
// store stores buf, an updated buffer, if its suitable to do so.
-func (b *buffer) store(buf []byte) error {
- if b.length > 0 {
- return ErrBusyBuffer
- } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
- b.buf = buf[:cap(buf)]
+func (b *buffer) store(buf []byte) {
+ if cap(buf) <= maxCachedBufSize && cap(buf) > cap(b.cachedBuf) {
+ b.cachedBuf = buf[:cap(buf)]
}
- return nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
index 295bfbe5..29b1aa43 100644
--- a/vendor/github.com/go-sql-driver/mysql/collations.go
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -8,8 +8,8 @@
package mysql
-const defaultCollation = "utf8mb4_general_ci"
-const binaryCollation = "binary"
+const defaultCollationID = 45 // utf8mb4_general_ci
+const binaryCollationID = 63
// A list of available collations mapped to the internal ID.
// To update this map use the following MySQL query:
diff --git a/vendor/github.com/go-sql-driver/mysql/compress.go b/vendor/github.com/go-sql-driver/mysql/compress.go
new file mode 100644
index 00000000..38bfa000
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/compress.go
@@ -0,0 +1,213 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2024 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "compress/zlib"
+ "fmt"
+ "io"
+ "sync"
+)
+
+var (
+ zrPool *sync.Pool // Do not use directly. Use zDecompress() instead.
+ zwPool *sync.Pool // Do not use directly. Use zCompress() instead.
+)
+
+func init() {
+ zrPool = &sync.Pool{
+ New: func() any { return nil },
+ }
+ zwPool = &sync.Pool{
+ New: func() any {
+ zw, err := zlib.NewWriterLevel(new(bytes.Buffer), 2)
+ if err != nil {
+ panic(err) // compress/zlib return non-nil error only if level is invalid
+ }
+ return zw
+ },
+ }
+}
+
+func zDecompress(src []byte, dst *bytes.Buffer) (int, error) {
+ br := bytes.NewReader(src)
+ var zr io.ReadCloser
+ var err error
+
+ if a := zrPool.Get(); a == nil {
+ if zr, err = zlib.NewReader(br); err != nil {
+ return 0, err
+ }
+ } else {
+ zr = a.(io.ReadCloser)
+ if err := zr.(zlib.Resetter).Reset(br, nil); err != nil {
+ return 0, err
+ }
+ }
+
+ n, _ := dst.ReadFrom(zr) // ignore err because zr.Close() will return it again.
+ err = zr.Close() // zr.Close() may return chuecksum error.
+ zrPool.Put(zr)
+ return int(n), err
+}
+
+func zCompress(src []byte, dst io.Writer) error {
+ zw := zwPool.Get().(*zlib.Writer)
+ zw.Reset(dst)
+ if _, err := zw.Write(src); err != nil {
+ return err
+ }
+ err := zw.Close()
+ zwPool.Put(zw)
+ return err
+}
+
+type compIO struct {
+ mc *mysqlConn
+ buff bytes.Buffer
+}
+
+func newCompIO(mc *mysqlConn) *compIO {
+ return &compIO{
+ mc: mc,
+ }
+}
+
+func (c *compIO) reset() {
+ c.buff.Reset()
+}
+
+func (c *compIO) readNext(need int) ([]byte, error) {
+ for c.buff.Len() < need {
+ if err := c.readCompressedPacket(); err != nil {
+ return nil, err
+ }
+ }
+ data := c.buff.Next(need)
+ return data[:need:need], nil // prevent caller writes into c.buff
+}
+
+func (c *compIO) readCompressedPacket() error {
+ header, err := c.mc.readNext(7)
+ if err != nil {
+ return err
+ }
+ _ = header[6] // bounds check hint to compiler; guaranteed by readNext
+
+ // compressed header structure
+ comprLength := getUint24(header[0:3])
+ compressionSequence := header[3]
+ uncompressedLength := getUint24(header[4:7])
+ if debug {
+ fmt.Printf("uncompress cmplen=%v uncomplen=%v pkt_cmp_seq=%v expected_cmp_seq=%v\n",
+ comprLength, uncompressedLength, compressionSequence, c.mc.sequence)
+ }
+ // Do not return ErrPktSync here.
+ // Server may return error packet (e.g. 1153 Got a packet bigger than 'max_allowed_packet' bytes)
+ // before receiving all packets from client. In this case, seqnr is younger than expected.
+ // NOTE: Both of mariadbclient and mysqlclient do not check seqnr. Only server checks it.
+ if debug && compressionSequence != c.mc.compressSequence {
+ fmt.Printf("WARN: unexpected cmpress seq nr: expected %v, got %v",
+ c.mc.compressSequence, compressionSequence)
+ }
+ c.mc.compressSequence = compressionSequence + 1
+
+ comprData, err := c.mc.readNext(comprLength)
+ if err != nil {
+ return err
+ }
+
+ // if payload is uncompressed, its length will be specified as zero, and its
+ // true length is contained in comprLength
+ if uncompressedLength == 0 {
+ c.buff.Write(comprData)
+ return nil
+ }
+
+ // use existing capacity in bytesBuf if possible
+ c.buff.Grow(uncompressedLength)
+ nread, err := zDecompress(comprData, &c.buff)
+ if err != nil {
+ return err
+ }
+ if nread != uncompressedLength {
+ return fmt.Errorf("invalid compressed packet: uncompressed length in header is %d, actual %d",
+ uncompressedLength, nread)
+ }
+ return nil
+}
+
+const minCompressLength = 150
+const maxPayloadLen = maxPacketSize - 4
+
+// writePackets sends one or some packets with compression.
+// Use this instead of mc.netConn.Write() when mc.compress is true.
+func (c *compIO) writePackets(packets []byte) (int, error) {
+ totalBytes := len(packets)
+ blankHeader := make([]byte, 7)
+ buf := &c.buff
+
+ for len(packets) > 0 {
+ payloadLen := min(maxPayloadLen, len(packets))
+ payload := packets[:payloadLen]
+ uncompressedLen := payloadLen
+
+ buf.Reset()
+ buf.Write(blankHeader) // Buffer.Write() never returns error
+
+ // If payload is less than minCompressLength, don't compress.
+ if uncompressedLen < minCompressLength {
+ buf.Write(payload)
+ uncompressedLen = 0
+ } else {
+ err := zCompress(payload, buf)
+ if debug && err != nil {
+ fmt.Printf("zCompress error: %v", err)
+ }
+ // do not compress if compressed data is larger than uncompressed data
+ // I intentionally miss 7 byte header in the buf; zCompress must compress more than 7 bytes.
+ if err != nil || buf.Len() >= uncompressedLen {
+ buf.Reset()
+ buf.Write(blankHeader)
+ buf.Write(payload)
+ uncompressedLen = 0
+ }
+ }
+
+ if n, err := c.writeCompressedPacket(buf.Bytes(), uncompressedLen); err != nil {
+ // To allow returning ErrBadConn when sending really 0 bytes, we sum
+ // up compressed bytes that is returned by underlying Write().
+ return totalBytes - len(packets) + n, err
+ }
+ packets = packets[payloadLen:]
+ }
+
+ return totalBytes, nil
+}
+
+// writeCompressedPacket writes a compressed packet with header.
+// data should start with 7 size space for header followed by payload.
+func (c *compIO) writeCompressedPacket(data []byte, uncompressedLen int) (int, error) {
+ mc := c.mc
+ comprLength := len(data) - 7
+ if debug {
+ fmt.Printf(
+ "writeCompressedPacket: comprLength=%v, uncompressedLen=%v, seq=%v\n",
+ comprLength, uncompressedLen, mc.compressSequence)
+ }
+
+ // compression header
+ putUint24(data[0:3], comprLength)
+ data[3] = mc.compressSequence
+ putUint24(data[4:7], uncompressedLen)
+
+ mc.compressSequence++
+ return mc.writeWithTimeout(data)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
index 9539077c..3e455a3f 100644
--- a/vendor/github.com/go-sql-driver/mysql/connection.go
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -13,28 +13,32 @@ import (
"database/sql"
"database/sql/driver"
"encoding/json"
+ "fmt"
"io"
"net"
+ "runtime"
"strconv"
"strings"
+ "sync/atomic"
"time"
)
type mysqlConn struct {
buf buffer
netConn net.Conn
- rawConn net.Conn // underlying connection when netConn is TLS connection.
- affectedRows uint64
- insertId uint64
+ rawConn net.Conn // underlying connection when netConn is TLS connection.
+ result mysqlResult // managed by clearResult() and handleOkPacket().
+ compIO *compIO
cfg *Config
+ connector *connector
maxAllowedPacket int
maxWriteSize int
- writeTimeout time.Duration
flags clientFlag
status statusFlag
sequence uint8
+ compressSequence uint8
parseTime bool
- reset bool // set when the Go SQL package calls ResetSession
+ compress bool
// for context support (Go 1.8+)
watching bool
@@ -42,61 +46,92 @@ type mysqlConn struct {
closech chan struct{}
finished chan<- struct{}
canceled atomicError // set non-nil if conn is canceled
- closed atomicBool // set when conn is closed, before closech is closed
+ closed atomic.Bool // set when conn is closed, before closech is closed
+}
+
+// Helper function to call per-connection logger.
+func (mc *mysqlConn) log(v ...any) {
+ _, filename, lineno, ok := runtime.Caller(1)
+ if ok {
+ pos := strings.LastIndexByte(filename, '/')
+ if pos != -1 {
+ filename = filename[pos+1:]
+ }
+ prefix := fmt.Sprintf("%s:%d ", filename, lineno)
+ v = append([]any{prefix}, v...)
+ }
+
+ mc.cfg.Logger.Print(v...)
+}
+
+func (mc *mysqlConn) readWithTimeout(b []byte) (int, error) {
+ to := mc.cfg.ReadTimeout
+ if to > 0 {
+ if err := mc.netConn.SetReadDeadline(time.Now().Add(to)); err != nil {
+ return 0, err
+ }
+ }
+ return mc.netConn.Read(b)
+}
+
+func (mc *mysqlConn) writeWithTimeout(b []byte) (int, error) {
+ to := mc.cfg.WriteTimeout
+ if to > 0 {
+ if err := mc.netConn.SetWriteDeadline(time.Now().Add(to)); err != nil {
+ return 0, err
+ }
+ }
+ return mc.netConn.Write(b)
+}
+
+func (mc *mysqlConn) resetSequence() {
+ mc.sequence = 0
+ mc.compressSequence = 0
+}
+
+// syncSequence must be called when finished writing some packet and before start reading.
+func (mc *mysqlConn) syncSequence() {
+ // Syncs compressionSequence to sequence.
+ // This is not documented but done in `net_flush()` in MySQL and MariaDB.
+ // https://github.com/mariadb-corporation/mariadb-connector-c/blob/8228164f850b12353da24df1b93a1e53cc5e85e9/libmariadb/ma_net.c#L170-L171
+ // https://github.com/mysql/mysql-server/blob/824e2b4064053f7daf17d7f3f84b7a3ed92e5fb4/sql-common/net_serv.cc#L293
+ if mc.compress {
+ mc.sequence = mc.compressSequence
+ mc.compIO.reset()
+ }
}
// Handles parameters set in DSN after the connection is established
func (mc *mysqlConn) handleParams() (err error) {
var cmdSet strings.Builder
- for param, val := range mc.cfg.Params {
- switch param {
- // Charset: character_set_connection, character_set_client, character_set_results
- case "charset":
- charsets := strings.Split(val, ",")
- for i := range charsets {
- // ignore errors here - a charset may not exist
- err = mc.exec("SET NAMES " + charsets[i])
- if err == nil {
- break
- }
- }
- if err != nil {
- return
- }
- // Other system vars accumulated in a single SET command
- default:
- if cmdSet.Len() == 0 {
- // Heuristic: 29 chars for each other key=value to reduce reallocations
- cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1))
- cmdSet.WriteString("SET ")
- } else {
- cmdSet.WriteByte(',')
- }
- cmdSet.WriteString(param)
- cmdSet.WriteByte('=')
- cmdSet.WriteString(val)
+ for param, val := range mc.cfg.Params {
+ if cmdSet.Len() == 0 {
+ // Heuristic: 29 chars for each other key=value to reduce reallocations
+ cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1))
+ cmdSet.WriteString("SET ")
+ } else {
+ cmdSet.WriteString(", ")
}
+ cmdSet.WriteString(param)
+ cmdSet.WriteString(" = ")
+ cmdSet.WriteString(val)
}
if cmdSet.Len() > 0 {
err = mc.exec(cmdSet.String())
- if err != nil {
- return
- }
}
return
}
+// markBadConn replaces errBadConnNoWrite with driver.ErrBadConn.
+// This function is used to return driver.ErrBadConn only when safe to retry.
func (mc *mysqlConn) markBadConn(err error) error {
- if mc == nil {
- return err
+ if err == errBadConnNoWrite {
+ return driver.ErrBadConn
}
- if err != errBadConnNoWrite {
- return err
- }
- return driver.ErrBadConn
+ return err
}
func (mc *mysqlConn) Begin() (driver.Tx, error) {
@@ -105,7 +140,6 @@ func (mc *mysqlConn) Begin() (driver.Tx, error) {
func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
var q string
@@ -126,12 +160,16 @@ func (mc *mysqlConn) Close() (err error) {
if !mc.closed.Load() {
err = mc.writeCommandPacket(comQuit)
}
-
- mc.cleanup()
-
+ mc.close()
return
}
+// close closes the network connection and clear results without sending COM_QUIT.
+func (mc *mysqlConn) close() {
+ mc.cleanup()
+ mc.clearResult()
+}
+
// Closes the network connection and unsets internal variables. Do not call this
// function after successfully authentication, call Close instead. This function
// is called before auth or on auth failure because MySQL will have already
@@ -143,12 +181,16 @@ func (mc *mysqlConn) cleanup() {
// Makes cleanup idempotent
close(mc.closech)
- if mc.netConn == nil {
+ conn := mc.rawConn
+ if conn == nil {
return
}
- if err := mc.netConn.Close(); err != nil {
- errLog.Print(err)
+ if err := conn.Close(); err != nil {
+ mc.log("closing connection:", err)
}
+ // This function can be called from multiple goroutines.
+ // So we can not mc.clearResult() here.
+ // Caller should do it if they are in safe goroutine.
}
func (mc *mysqlConn) error() error {
@@ -163,14 +205,13 @@ func (mc *mysqlConn) error() error {
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := mc.writeCommandPacketStr(comStmtPrepare, query)
if err != nil {
// STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
- errLog.Print(err)
+ mc.log(err)
return nil, driver.ErrBadConn
}
@@ -204,8 +245,10 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
buf, err := mc.buf.takeCompleteBuffer()
if err != nil {
// can not take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return "", ErrInvalidConn
+ mc.cleanup()
+ // interpolateParams would be called before sending any query.
+ // So its safe to retry.
+ return "", driver.ErrBadConn
}
buf = buf[:0]
argPos := 0
@@ -246,7 +289,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
buf = append(buf, "'0000-00-00'"...)
} else {
buf = append(buf, '\'')
- buf, err = appendDateTime(buf, v.In(mc.cfg.Loc))
+ buf, err = appendDateTime(buf, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
if err != nil {
return "", err
}
@@ -296,7 +339,6 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@@ -310,28 +352,25 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
}
query = prepared
}
- mc.affectedRows = 0
- mc.insertId = 0
err := mc.exec(query)
if err == nil {
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, err
+ copied := mc.result
+ return &copied, err
}
return nil, mc.markBadConn(err)
}
// Internal function to execute commands
func (mc *mysqlConn) exec(query string) error {
+ handleOk := mc.clearResult()
// Send command
if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
return mc.markBadConn(err)
}
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return err
}
@@ -348,7 +387,7 @@ func (mc *mysqlConn) exec(query string) error {
}
}
- return mc.discardResults()
+ return handleOk.discardResults()
}
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
@@ -356,8 +395,9 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
}
func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ handleOk := mc.clearResult()
+
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@@ -373,43 +413,47 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error)
}
// Send command
err := mc.writeCommandPacketStr(comQuery, query)
- if err == nil {
- // Read Result
- var resLen int
- resLen, err = mc.readResultSetHeaderPacket()
- if err == nil {
- rows := new(textRows)
- rows.mc = mc
+ if err != nil {
+ return nil, mc.markBadConn(err)
+ }
- if resLen == 0 {
- rows.rs.done = true
+ // Read Result
+ var resLen int
+ resLen, err = handleOk.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
- switch err := rows.NextResultSet(); err {
- case nil, io.EOF:
- return rows, nil
- default:
- return nil, err
- }
- }
+ rows := new(textRows)
+ rows.mc = mc
- // Columns
- rows.rs.columns, err = mc.readColumns(resLen)
- return rows, err
+ if resLen == 0 {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
}
}
- return nil, mc.markBadConn(err)
+
+ // Columns
+ rows.rs.columns, err = mc.readColumns(resLen)
+ return rows, err
}
// Gets the value of the given MySQL System Variable
// The returned byte slice is only valid until the next read
func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
// Send command
+ handleOk := mc.clearResult()
if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
return nil, err
}
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err == nil {
rows := new(textRows)
rows.mc = mc
@@ -430,7 +474,7 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
return nil, err
}
-// finish is called when the query has canceled.
+// cancel is called when the query has canceled.
func (mc *mysqlConn) cancel(err error) {
mc.canceled.Set(err)
mc.cleanup()
@@ -451,7 +495,6 @@ func (mc *mysqlConn) finish() {
// Ping implements driver.Pinger interface
func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return driver.ErrBadConn
}
@@ -460,11 +503,12 @@ func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
}
defer mc.finish()
+ handleOk := mc.clearResult()
if err = mc.writeCommandPacket(comPing); err != nil {
return mc.markBadConn(err)
}
- return mc.readResultOK()
+ return handleOk.readResultOK()
}
// BeginTx implements driver.ConnBeginTx interface
@@ -636,15 +680,42 @@ func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
// ResetSession implements driver.SessionResetter.
// (From Go 1.10)
func (mc *mysqlConn) ResetSession(ctx context.Context) error {
- if mc.closed.Load() {
+ if mc.closed.Load() || mc.buf.busy() {
return driver.ErrBadConn
}
- mc.reset = true
+
+ // Perform a stale connection check. We only perform this check for
+ // the first query on a connection that has been checked out of the
+ // connection pool: a fresh connection from the pool is more likely
+ // to be stale, and it has not performed any previous writes that
+ // could cause data corruption, so it's safe to return ErrBadConn
+ // if the check fails.
+ if mc.cfg.CheckConnLiveness {
+ conn := mc.netConn
+ if mc.rawConn != nil {
+ conn = mc.rawConn
+ }
+ var err error
+ if mc.cfg.ReadTimeout != 0 {
+ err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
+ }
+ if err == nil {
+ err = connCheck(conn)
+ }
+ if err != nil {
+ mc.log("closing bad idle connection: ", err)
+ return driver.ErrBadConn
+ }
+ }
+
return nil
}
// IsValid implements driver.Validator interface
// (From Go 1.15)
func (mc *mysqlConn) IsValid() bool {
- return !mc.closed.Load()
+ return !mc.closed.Load() && !mc.buf.busy()
}
+
+var _ driver.SessionResetter = &mysqlConn{}
+var _ driver.Validator = &mysqlConn{}
diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go
index d567b4e4..bc1d46af 100644
--- a/vendor/github.com/go-sql-driver/mysql/connector.go
+++ b/vendor/github.com/go-sql-driver/mysql/connector.go
@@ -11,11 +11,55 @@ package mysql
import (
"context"
"database/sql/driver"
+ "fmt"
"net"
+ "os"
+ "strconv"
+ "strings"
)
type connector struct {
- cfg *Config // immutable private copy.
+ cfg *Config // immutable private copy.
+ encodedAttributes string // Encoded connection attributes.
+}
+
+func encodeConnectionAttributes(cfg *Config) string {
+ connAttrsBuf := make([]byte, 0)
+
+ // default connection attributes
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientName)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientNameValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOS)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOSValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatform)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatformValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPid)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, strconv.Itoa(os.Getpid()))
+ serverHost, _, _ := net.SplitHostPort(cfg.Addr)
+ if serverHost != "" {
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrServerHost)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, serverHost)
+ }
+
+ // user-defined connection attributes
+ for _, connAttr := range strings.Split(cfg.ConnectionAttributes, ",") {
+ k, v, found := strings.Cut(connAttr, ":")
+ if !found {
+ continue
+ }
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, k)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, v)
+ }
+
+ return string(connAttrsBuf)
+}
+
+func newConnector(cfg *Config) *connector {
+ encodedAttributes := encodeConnectionAttributes(cfg)
+ return &connector{
+ cfg: cfg,
+ encodedAttributes: encodedAttributes,
+ }
}
// Connect implements driver.Connector interface.
@@ -23,43 +67,56 @@ type connector struct {
func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
var err error
+ // Invoke beforeConnect if present, with a copy of the configuration
+ cfg := c.cfg
+ if c.cfg.beforeConnect != nil {
+ cfg = c.cfg.Clone()
+ err = c.cfg.beforeConnect(ctx, cfg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
// New mysqlConn
mc := &mysqlConn{
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
closech: make(chan struct{}),
- cfg: c.cfg,
+ cfg: cfg,
+ connector: c,
}
mc.parseTime = mc.cfg.ParseTime
// Connect to Server
- dialsLock.RLock()
- dial, ok := dials[mc.cfg.Net]
- dialsLock.RUnlock()
- if ok {
- dctx := ctx
- if mc.cfg.Timeout > 0 {
- var cancel context.CancelFunc
- dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
- defer cancel()
- }
- mc.netConn, err = dial(dctx, mc.cfg.Addr)
- } else {
- nd := net.Dialer{Timeout: mc.cfg.Timeout}
- mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
+ dctx := ctx
+ if mc.cfg.Timeout > 0 {
+ var cancel context.CancelFunc
+ dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
+ defer cancel()
}
+ if c.cfg.DialFunc != nil {
+ mc.netConn, err = c.cfg.DialFunc(dctx, mc.cfg.Net, mc.cfg.Addr)
+ } else {
+ dialsLock.RLock()
+ dial, ok := dials[mc.cfg.Net]
+ dialsLock.RUnlock()
+ if ok {
+ mc.netConn, err = dial(dctx, mc.cfg.Addr)
+ } else {
+ nd := net.Dialer{}
+ mc.netConn, err = nd.DialContext(dctx, mc.cfg.Net, mc.cfg.Addr)
+ }
+ }
if err != nil {
return nil, err
}
+ mc.rawConn = mc.netConn
// Enable TCP Keepalives on TCP connections
if tc, ok := mc.netConn.(*net.TCPConn); ok {
if err := tc.SetKeepAlive(true); err != nil {
- // Don't send COM_QUIT before handshake.
- mc.netConn.Close()
- mc.netConn = nil
- return nil, err
+ c.cfg.Logger.Print(err)
}
}
@@ -71,11 +128,7 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
}
defer mc.finish()
- mc.buf = newBuffer(mc.netConn)
-
- // Set I/O timeouts
- mc.buf.timeout = mc.cfg.ReadTimeout
- mc.writeTimeout = mc.cfg.WriteTimeout
+ mc.buf = newBuffer()
// Reading Handshake Initialization Packet
authData, plugin, err := mc.readHandshakePacket()
@@ -92,7 +145,7 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
authResp, err := mc.auth(authData, plugin)
if err != nil {
// try the default auth plugin, if using the requested plugin failed
- errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ c.cfg.Logger.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
plugin = defaultAuthPlugin
authResp, err = mc.auth(authData, plugin)
if err != nil {
@@ -114,6 +167,10 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
return nil, err
}
+ if mc.cfg.compress && mc.flags&clientCompress == clientCompress {
+ mc.compress = true
+ mc.compIO = newCompIO(mc)
+ }
if mc.cfg.MaxAllowedPacket > 0 {
mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
} else {
@@ -123,12 +180,36 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
mc.Close()
return nil, err
}
- mc.maxAllowedPacket = stringToInt(maxap) - 1
+ n, err := strconv.Atoi(string(maxap))
+ if err != nil {
+ mc.Close()
+ return nil, fmt.Errorf("invalid max_allowed_packet value (%q): %w", maxap, err)
+ }
+ mc.maxAllowedPacket = n - 1
}
if mc.maxAllowedPacket < maxPacketSize {
mc.maxWriteSize = mc.maxAllowedPacket
}
+ // Charset: character_set_connection, character_set_client, character_set_results
+ if len(mc.cfg.charsets) > 0 {
+ for _, cs := range mc.cfg.charsets {
+ // ignore errors here - a charset may not exist
+ if mc.cfg.Collation != "" {
+ err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation)
+ } else {
+ err = mc.exec("SET NAMES " + cs)
+ }
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+ }
+
// Handle DSN Params
err = mc.handleParams()
if err != nil {
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
index b1e6b85e..4aadcd64 100644
--- a/vendor/github.com/go-sql-driver/mysql/const.go
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -8,12 +8,27 @@
package mysql
+import "runtime"
+
const (
+ debug = false // for debugging. Set true only in development.
+
defaultAuthPlugin = "mysql_native_password"
- defaultMaxAllowedPacket = 4 << 20 // 4 MiB
+ defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355
minProtocolVersion = 10
maxPacketSize = 1<<24 - 1
timeFormat = "2006-01-02 15:04:05.999999"
+
+ // Connection attributes
+ // See https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html#performance-schema-connection-attributes-available
+ connAttrClientName = "_client_name"
+ connAttrClientNameValue = "Go-MySQL-Driver"
+ connAttrOS = "_os"
+ connAttrOSValue = runtime.GOOS
+ connAttrPlatform = "_platform"
+ connAttrPlatformValue = runtime.GOARCH
+ connAttrPid = "_pid"
+ connAttrServerHost = "_server_host"
)
// MySQL constants documentation:
@@ -112,7 +127,10 @@ const (
fieldTypeBit
)
const (
- fieldTypeJSON fieldType = iota + 0xf5
+ fieldTypeVector fieldType = iota + 0xf2
+ fieldTypeInvalid
+ fieldTypeBool
+ fieldTypeJSON
fieldTypeNewDecimal
fieldTypeEnum
fieldTypeSet
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
index ad7aec21..105316b8 100644
--- a/vendor/github.com/go-sql-driver/mysql/driver.go
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -55,6 +55,15 @@ func RegisterDialContext(net string, dial DialContextFunc) {
dials[net] = dial
}
+// DeregisterDialContext removes the custom dial function registered with the given net.
+func DeregisterDialContext(net string) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
+ if dials != nil {
+ delete(dials, net)
+ }
+}
+
// RegisterDial registers a custom dial function. It can then be used by the
// network address mynet(addr), where mynet is the registered new network.
// addr is passed as a parameter to the dial function.
@@ -74,14 +83,18 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
if err != nil {
return nil, err
}
- c := &connector{
- cfg: cfg,
- }
+ c := newConnector(cfg)
return c.Connect(context.Background())
}
+// This variable can be replaced with -ldflags like below:
+// go build "-ldflags=-X github.com/go-sql-driver/mysql.driverName=custom"
+var driverName = "mysql"
+
func init() {
- sql.Register("mysql", &MySQLDriver{})
+ if driverName != "" {
+ sql.Register(driverName, &MySQLDriver{})
+ }
}
// NewConnector returns new driver.Connector.
@@ -92,7 +105,7 @@ func NewConnector(cfg *Config) (driver.Connector, error) {
if err := cfg.normalize(); err != nil {
return nil, err
}
- return &connector{cfg: cfg}, nil
+ return newConnector(cfg), nil
}
// OpenConnector implements driver.DriverContext.
@@ -101,7 +114,5 @@ func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
if err != nil {
return nil, err
}
- return &connector{
- cfg: cfg,
- }, nil
+ return newConnector(cfg), nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
index 4b71aaab..ecf62567 100644
--- a/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -10,6 +10,7 @@ package mysql
import (
"bytes"
+ "context"
"crypto/rsa"
"crypto/tls"
"errors"
@@ -34,22 +35,29 @@ var (
// If a new Config is created instead of being parsed from a DSN string,
// the NewConfig function should be used, which sets default values.
type Config struct {
- User string // Username
- Passwd string // Password (requires User)
- Net string // Network type
- Addr string // Network address (requires Net)
- DBName string // Database name
- Params map[string]string // Connection parameters
- Collation string // Connection collation
- Loc *time.Location // Location for time.Time values
- MaxAllowedPacket int // Max packet size allowed
- ServerPubKey string // Server public key name
- pubKey *rsa.PublicKey // Server public key
- TLSConfig string // TLS configuration name
- TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
- Timeout time.Duration // Dial timeout
- ReadTimeout time.Duration // I/O read timeout
- WriteTimeout time.Duration // I/O write timeout
+ // non boolean fields
+
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network (e.g. "tcp", "tcp6", "unix". default: "tcp")
+ Addr string // Address (default: "127.0.0.1:3306" for "tcp" and "/tmp/mysql.sock" for "unix")
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ ConnectionAttributes string // Connection Attributes, comma-delimited string of user-defined "key:value" pairs
+ Collation string // Connection collation. When set, this will be set in SET NAMES COLLATE query
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ TLSConfig string // TLS configuration name
+ TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+ Logger Logger // Logger
+ // DialFunc specifies the dial function for creating connections
+ DialFunc func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // boolean fields
AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
AllowCleartextPasswords bool // Allows the cleartext client side plugin
@@ -63,17 +71,83 @@ type Config struct {
MultiStatements bool // Allow multiple statements in one query
ParseTime bool // Parse time values to time.Time
RejectReadOnly bool // Reject read-only connections
+
+ // unexported fields. new options should be come here.
+ // boolean first. alphabetical order.
+
+ compress bool // Enable zlib compression
+
+ beforeConnect func(context.Context, *Config) error // Invoked before a connection is established
+ pubKey *rsa.PublicKey // Server public key
+ timeTruncate time.Duration // Truncate time.Time values to the specified duration
+ charsets []string // Connection charset. When set, this will be set in SET NAMES query
}
+// Functional Options Pattern
+// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type Option func(*Config) error
+
// NewConfig creates a new Config and sets default values.
func NewConfig() *Config {
- return &Config{
- Collation: defaultCollation,
+ cfg := &Config{
Loc: time.UTC,
MaxAllowedPacket: defaultMaxAllowedPacket,
+ Logger: defaultLogger,
AllowNativePasswords: true,
CheckConnLiveness: true,
}
+ return cfg
+}
+
+// Apply applies the given options to the Config object.
+func (c *Config) Apply(opts ...Option) error {
+ for _, opt := range opts {
+ err := opt(c)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TimeTruncate sets the time duration to truncate time.Time values in
+// query parameters.
+func TimeTruncate(d time.Duration) Option {
+ return func(cfg *Config) error {
+ cfg.timeTruncate = d
+ return nil
+ }
+}
+
+// BeforeConnect sets the function to be invoked before a connection is established.
+func BeforeConnect(fn func(context.Context, *Config) error) Option {
+ return func(cfg *Config) error {
+ cfg.beforeConnect = fn
+ return nil
+ }
+}
+
+// EnableCompress sets the compression mode.
+func EnableCompression(yes bool) Option {
+ return func(cfg *Config) error {
+ cfg.compress = yes
+ return nil
+ }
+}
+
+// Charset sets the connection charset and collation.
+//
+// charset is the connection charset.
+// collation is the connection collation. It can be null or empty string.
+//
+// When collation is not specified, `SET NAMES ` command is sent when the connection is established.
+// When collation is specified, `SET NAMES COLLATE ` command is sent when the connection is established.
+func Charset(charset, collation string) Option {
+ return func(cfg *Config) error {
+ cfg.charsets = []string{charset}
+ cfg.Collation = collation
+ return nil
+ }
}
func (cfg *Config) Clone() *Config {
@@ -97,7 +171,7 @@ func (cfg *Config) Clone() *Config {
}
func (cfg *Config) normalize() error {
- if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ if cfg.InterpolateParams && cfg.Collation != "" && unsafeCollations[cfg.Collation] {
return errInvalidDSNUnsafeCollation
}
@@ -153,6 +227,10 @@ func (cfg *Config) normalize() error {
}
}
+ if cfg.Logger == nil {
+ cfg.Logger = defaultLogger
+ }
+
return nil
}
@@ -171,6 +249,8 @@ func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) {
// FormatDSN formats the given Config into a DSN string which can be passed to
// the driver.
+//
+// Note: use [NewConnector] and [database/sql.OpenDB] to open a connection from a [*Config].
func (cfg *Config) FormatDSN() string {
var buf bytes.Buffer
@@ -196,7 +276,7 @@ func (cfg *Config) FormatDSN() string {
// /dbname
buf.WriteByte('/')
- buf.WriteString(cfg.DBName)
+ buf.WriteString(url.PathEscape(cfg.DBName))
// [?param1=value1&...¶mN=valueN]
hasParam := false
@@ -230,7 +310,11 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
}
- if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ if charsets := cfg.charsets; len(charsets) > 0 {
+ writeDSNParam(&buf, &hasParam, "charset", strings.Join(charsets, ","))
+ }
+
+ if col := cfg.Collation; col != "" {
writeDSNParam(&buf, &hasParam, "collation", col)
}
@@ -238,6 +322,14 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true")
}
+ if cfg.ConnectionAttributes != "" {
+ writeDSNParam(&buf, &hasParam, "connectionAttributes", url.QueryEscape(cfg.ConnectionAttributes))
+ }
+
+ if cfg.compress {
+ writeDSNParam(&buf, &hasParam, "compress", "true")
+ }
+
if cfg.InterpolateParams {
writeDSNParam(&buf, &hasParam, "interpolateParams", "true")
}
@@ -254,6 +346,10 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "parseTime", "true")
}
+ if cfg.timeTruncate > 0 {
+ writeDSNParam(&buf, &hasParam, "timeTruncate", cfg.timeTruncate.String())
+ }
+
if cfg.ReadTimeout > 0 {
writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String())
}
@@ -358,7 +454,11 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
break
}
}
- cfg.DBName = dsn[i+1 : j]
+
+ dbname := dsn[i+1 : j]
+ if cfg.DBName, err = url.PathUnescape(dbname); err != nil {
+ return nil, fmt.Errorf("invalid dbname %q: %w", dbname, err)
+ }
break
}
@@ -378,13 +478,13 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
// Values must be url.QueryEscape'ed
func parseDSNParams(cfg *Config, params string) (err error) {
for _, v := range strings.Split(params, "&") {
- param := strings.SplitN(v, "=", 2)
- if len(param) != 2 {
+ key, value, found := strings.Cut(v, "=")
+ if !found {
continue
}
// cfg params
- switch value := param[1]; param[0] {
+ switch key {
// Disable INFILE allowlist / enable all files
case "allowAllFiles":
var isBool bool
@@ -441,6 +541,10 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
+ // charset
+ case "charset":
+ cfg.charsets = strings.Split(value, ",")
+
// Collation
case "collation":
cfg.Collation = value
@@ -454,7 +558,11 @@ func parseDSNParams(cfg *Config, params string) (err error) {
// Compression
case "compress":
- return errors.New("compression not implemented yet")
+ var isBool bool
+ cfg.compress, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
// Enable client side placeholder substitution
case "interpolateParams":
@@ -490,6 +598,13 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
+ // time.Time truncation
+ case "timeTruncate":
+ cfg.timeTruncate, err = time.ParseDuration(value)
+ if err != nil {
+ return fmt.Errorf("invalid timeTruncate value: %v, error: %w", value, err)
+ }
+
// I/O read Timeout
case "readTimeout":
cfg.ReadTimeout, err = time.ParseDuration(value)
@@ -554,13 +669,22 @@ func parseDSNParams(cfg *Config, params string) (err error) {
if err != nil {
return
}
+
+ // Connection attributes
+ case "connectionAttributes":
+ connectionAttributes, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid connectionAttributes value: %v", err)
+ }
+ cfg.ConnectionAttributes = connectionAttributes
+
default:
// lazy init
if cfg.Params == nil {
cfg.Params = make(map[string]string)
}
- if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ if cfg.Params[key], err = url.QueryUnescape(value); err != nil {
return
}
}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
index 7c037e7d..584617b1 100644
--- a/vendor/github.com/go-sql-driver/mysql/errors.go
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -21,36 +21,42 @@ var (
ErrMalformPkt = errors.New("malformed packet")
ErrNoTLS = errors.New("TLS requested but server does not support TLS")
ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
- ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication")
ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
ErrPktSync = errors.New("commands out of sync. You can't run this command now")
ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
- ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
+ ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the `Config.MaxAllowedPacket`")
ErrBusyBuffer = errors.New("busy buffer")
// errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
// If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
- // to trigger a resend.
+ // to trigger a resend. Use mc.markBadConn(err) to do this.
// See https://github.com/go-sql-driver/mysql/pull/302
errBadConnNoWrite = errors.New("bad connection")
)
-var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime))
// Logger is used to log critical error messages.
type Logger interface {
- Print(v ...interface{})
+ Print(v ...any)
}
-// SetLogger is used to set the logger for critical errors.
+// NopLogger is a nop implementation of the Logger interface.
+type NopLogger struct{}
+
+// Print implements Logger interface.
+func (nl *NopLogger) Print(_ ...any) {}
+
+// SetLogger is used to set the default logger for critical errors.
// The initial logger is os.Stderr.
func SetLogger(logger Logger) error {
if logger == nil {
return errors.New("logger is nil")
}
- errLog = logger
+ defaultLogger = logger
return nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
index e0654a83..be5cd809 100644
--- a/vendor/github.com/go-sql-driver/mysql/fields.go
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -18,7 +18,7 @@ func (mf *mysqlField) typeDatabaseName() string {
case fieldTypeBit:
return "BIT"
case fieldTypeBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "TEXT"
}
return "BLOB"
@@ -37,6 +37,9 @@ func (mf *mysqlField) typeDatabaseName() string {
case fieldTypeGeometry:
return "GEOMETRY"
case fieldTypeInt24:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED MEDIUMINT"
+ }
return "MEDIUMINT"
case fieldTypeJSON:
return "JSON"
@@ -46,7 +49,7 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "INT"
case fieldTypeLongBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "LONGTEXT"
}
return "LONGBLOB"
@@ -56,7 +59,7 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "BIGINT"
case fieldTypeMediumBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "MEDIUMTEXT"
}
return "MEDIUMBLOB"
@@ -74,7 +77,12 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "SMALLINT"
case fieldTypeString:
- if mf.charSet == collations[binaryCollation] {
+ if mf.flags&flagEnum != 0 {
+ return "ENUM"
+ } else if mf.flags&flagSet != 0 {
+ return "SET"
+ }
+ if mf.charSet == binaryCollationID {
return "BINARY"
}
return "CHAR"
@@ -88,43 +96,47 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "TINYINT"
case fieldTypeTinyBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "TINYTEXT"
}
return "TINYBLOB"
case fieldTypeVarChar:
- if mf.charSet == collations[binaryCollation] {
+ if mf.charSet == binaryCollationID {
return "VARBINARY"
}
return "VARCHAR"
case fieldTypeVarString:
- if mf.charSet == collations[binaryCollation] {
+ if mf.charSet == binaryCollationID {
return "VARBINARY"
}
return "VARCHAR"
case fieldTypeYear:
return "YEAR"
+ case fieldTypeVector:
+ return "VECTOR"
default:
return ""
}
}
var (
- scanTypeFloat32 = reflect.TypeOf(float32(0))
- scanTypeFloat64 = reflect.TypeOf(float64(0))
- scanTypeInt8 = reflect.TypeOf(int8(0))
- scanTypeInt16 = reflect.TypeOf(int16(0))
- scanTypeInt32 = reflect.TypeOf(int32(0))
- scanTypeInt64 = reflect.TypeOf(int64(0))
- scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
- scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
- scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
- scanTypeUint8 = reflect.TypeOf(uint8(0))
- scanTypeUint16 = reflect.TypeOf(uint16(0))
- scanTypeUint32 = reflect.TypeOf(uint32(0))
- scanTypeUint64 = reflect.TypeOf(uint64(0))
- scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
- scanTypeUnknown = reflect.TypeOf(new(interface{}))
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeString = reflect.TypeOf("")
+ scanTypeNullString = reflect.TypeOf(sql.NullString{})
+ scanTypeBytes = reflect.TypeOf([]byte{})
+ scanTypeUnknown = reflect.TypeOf(new(any))
)
type mysqlField struct {
@@ -187,12 +199,18 @@ func (mf *mysqlField) scanType() reflect.Type {
}
return scanTypeNullFloat
+ case fieldTypeBit, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB,
+ fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeVector:
+ if mf.charSet == binaryCollationID {
+ return scanTypeBytes
+ }
+ fallthrough
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
- fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
- fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
- fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
- fieldTypeTime:
- return scanTypeRawBytes
+ fieldTypeEnum, fieldTypeSet, fieldTypeJSON, fieldTypeTime:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeString
+ }
+ return scanTypeNullString
case fieldTypeDate, fieldTypeNewDate,
fieldTypeTimestamp, fieldTypeDateTime:
diff --git a/vendor/github.com/go-sql-driver/mysql/fuzz.go b/vendor/github.com/go-sql-driver/mysql/fuzz.go
deleted file mode 100644
index 3a4ec25a..00000000
--- a/vendor/github.com/go-sql-driver/mysql/fuzz.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
-//
-// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-//go:build gofuzz
-// +build gofuzz
-
-package mysql
-
-import (
- "database/sql"
-)
-
-func Fuzz(data []byte) int {
- db, err := sql.Open("mysql", string(data))
- if err != nil {
- return 0
- }
- db.Close()
- return 1
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
index 3279dcff..453ae091 100644
--- a/vendor/github.com/go-sql-driver/mysql/infile.go
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -17,7 +17,7 @@ import (
)
var (
- fileRegister map[string]bool
+ fileRegister map[string]struct{}
fileRegisterLock sync.RWMutex
readerRegister map[string]func() io.Reader
readerRegisterLock sync.RWMutex
@@ -37,10 +37,10 @@ func RegisterLocalFile(filePath string) {
fileRegisterLock.Lock()
// lazy map init
if fileRegister == nil {
- fileRegister = make(map[string]bool)
+ fileRegister = make(map[string]struct{})
}
- fileRegister[strings.Trim(filePath, `"`)] = true
+ fileRegister[strings.Trim(filePath, `"`)] = struct{}{}
fileRegisterLock.Unlock()
}
@@ -93,9 +93,8 @@ func deferredClose(err *error, closer io.Closer) {
const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
-func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+func (mc *okHandler) handleInFileRequest(name string) (err error) {
var rdr io.Reader
- var data []byte
packetSize := defaultPacketSize
if mc.maxWriteSize < packetSize {
packetSize = mc.maxWriteSize
@@ -116,17 +115,17 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
defer deferredClose(&err, cl)
}
} else {
- err = fmt.Errorf("Reader '%s' is ", name)
+ err = fmt.Errorf("reader '%s' is ", name)
}
} else {
- err = fmt.Errorf("Reader '%s' is not registered", name)
+ err = fmt.Errorf("reader '%s' is not registered", name)
}
} else { // File
name = strings.Trim(name, `"`)
fileRegisterLock.RLock()
- fr := fileRegister[name]
+ _, exists := fileRegister[name]
fileRegisterLock.RUnlock()
- if mc.cfg.AllowAllFiles || fr {
+ if mc.cfg.AllowAllFiles || exists {
var file *os.File
var fi os.FileInfo
@@ -147,14 +146,16 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
}
// send content packets
+ var data []byte
+
// if packetSize == 0, the Reader contains no data
if err == nil && packetSize > 0 {
- data := make([]byte, 4+packetSize)
+ data = make([]byte, 4+packetSize)
var n int
for err == nil {
n, err = rdr.Read(data[4:])
if n > 0 {
- if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ if ioErr := mc.conn().writePacket(data[:4+n]); ioErr != nil {
return ioErr
}
}
@@ -168,15 +169,16 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
if data == nil {
data = make([]byte, 4)
}
- if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ if ioErr := mc.conn().writePacket(data[:4]); ioErr != nil {
return ioErr
}
+ mc.conn().syncSequence()
// read OK packet
if err == nil {
return mc.readResultOK()
}
- mc.readPacket()
+ mc.conn().readPacket()
return err
}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go
index 36c8a42c..316a48aa 100644
--- a/vendor/github.com/go-sql-driver/mysql/nulltime.go
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go
@@ -38,7 +38,7 @@ type NullTime sql.NullTime
// Scan implements the Scanner interface.
// The value type must be time.Time or string / []byte (formatted time-string),
// otherwise Scan fails.
-func (nt *NullTime) Scan(value interface{}) (err error) {
+func (nt *NullTime) Scan(value any) (err error) {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return
@@ -59,7 +59,7 @@ func (nt *NullTime) Scan(value interface{}) (err error) {
}
nt.Valid = false
- return fmt.Errorf("Can't convert %T to time.Time", value)
+ return fmt.Errorf("can't convert %T to time.Time", value)
}
// Value implements the driver Valuer interface.
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index ee05c95a..831fca6c 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -14,75 +14,108 @@ import (
"database/sql/driver"
"encoding/binary"
"encoding/json"
- "errors"
"fmt"
"io"
"math"
+ "os"
+ "strconv"
"time"
)
-// Packets documentation:
-// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+// MySQL client/server protocol documentations.
+// https://dev.mysql.com/doc/dev/mysql-server/latest/PAGE_PROTOCOL.html
+// https://mariadb.com/kb/en/clientserver-protocol/
+
+// read n bytes from mc.buf
+func (mc *mysqlConn) readNext(n int) ([]byte, error) {
+ if mc.buf.len() < n {
+ err := mc.buf.fill(n, mc.readWithTimeout)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return mc.buf.readNext(n), nil
+}
// Read packet to buffer 'data'
func (mc *mysqlConn) readPacket() ([]byte, error) {
var prevData []byte
+ invalidSequence := false
+
+ readNext := mc.readNext
+ if mc.compress {
+ readNext = mc.compIO.readNext
+ }
+
for {
// read packet header
- data, err := mc.buf.readNext(4)
+ data, err := readNext(4)
if err != nil {
+ mc.close()
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
- errLog.Print(err)
- mc.Close()
+ mc.log(err)
return nil, ErrInvalidConn
}
// packet length [24 bit]
- pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
+ pktLen := getUint24(data[:3])
+ seq := data[3]
// check packet sync [8 bit]
- if data[3] != mc.sequence {
- if data[3] > mc.sequence {
- return nil, ErrPktSyncMul
+ if seq != mc.sequence {
+ mc.log(fmt.Sprintf("[warn] unexpected sequence nr: expected %v, got %v", mc.sequence, seq))
+ // MySQL and MariaDB doesn't check packet nr in compressed packet.
+ if !mc.compress {
+ // For large packets, we stop reading as soon as sync error.
+ if len(prevData) > 0 {
+ mc.close()
+ return nil, ErrPktSyncMul
+ }
+ invalidSequence = true
}
- return nil, ErrPktSync
}
- mc.sequence++
+ mc.sequence = seq + 1
// packets with length 0 terminate a previous packet which is a
// multiple of (2^24)-1 bytes long
if pktLen == 0 {
// there was no previous packet
if prevData == nil {
- errLog.Print(ErrMalformPkt)
- mc.Close()
+ mc.log(ErrMalformPkt)
+ mc.close()
return nil, ErrInvalidConn
}
-
return prevData, nil
}
// read packet body [pktLen bytes]
- data, err = mc.buf.readNext(pktLen)
+ data, err = readNext(pktLen)
if err != nil {
+ mc.close()
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
- errLog.Print(err)
- mc.Close()
+ mc.log(err)
return nil, ErrInvalidConn
}
// return data if this was the last packet
if pktLen < maxPacketSize {
// zero allocations for non-split packets
- if prevData == nil {
- return data, nil
+ if prevData != nil {
+ data = append(prevData, data...)
}
-
- return append(prevData, data...), nil
+ if invalidSequence {
+ mc.close()
+ // return sync error only for regular packet.
+ // error packets may have wrong sequence number.
+ if data[0] != iERR {
+ return nil, ErrPktSync
+ }
+ }
+ return data, nil
}
prevData = append(prevData, data...)
@@ -92,88 +125,52 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// Write packet buffer 'data'
func (mc *mysqlConn) writePacket(data []byte) error {
pktLen := len(data) - 4
-
if pktLen > mc.maxAllowedPacket {
return ErrPktTooLarge
}
- // Perform a stale connection check. We only perform this check for
- // the first query on a connection that has been checked out of the
- // connection pool: a fresh connection from the pool is more likely
- // to be stale, and it has not performed any previous writes that
- // could cause data corruption, so it's safe to return ErrBadConn
- // if the check fails.
- if mc.reset {
- mc.reset = false
- conn := mc.netConn
- if mc.rawConn != nil {
- conn = mc.rawConn
- }
- var err error
- if mc.cfg.CheckConnLiveness {
- if mc.cfg.ReadTimeout != 0 {
- err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
- }
- if err == nil {
- err = connCheck(conn)
- }
- }
- if err != nil {
- errLog.Print("closing bad idle connection: ", err)
- mc.Close()
- return driver.ErrBadConn
- }
+ writeFunc := mc.writeWithTimeout
+ if mc.compress {
+ writeFunc = mc.compIO.writePackets
}
for {
- var size int
- if pktLen >= maxPacketSize {
- data[0] = 0xff
- data[1] = 0xff
- data[2] = 0xff
- size = maxPacketSize
- } else {
- data[0] = byte(pktLen)
- data[1] = byte(pktLen >> 8)
- data[2] = byte(pktLen >> 16)
- size = pktLen
- }
+ size := min(maxPacketSize, pktLen)
+ putUint24(data[:3], size)
data[3] = mc.sequence
// Write packet
- if mc.writeTimeout > 0 {
- if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
- return err
- }
+ if debug {
+ fmt.Fprintf(os.Stderr, "writePacket: size=%v seq=%v\n", size, mc.sequence)
}
- n, err := mc.netConn.Write(data[:4+size])
- if err == nil && n == 4+size {
- mc.sequence++
- if size != maxPacketSize {
- return nil
- }
- pktLen -= size
- data = data[size:]
- continue
- }
-
- // Handle error
- if err == nil { // n != len(data)
+ n, err := writeFunc(data[:4+size])
+ if err != nil {
mc.cleanup()
- errLog.Print(ErrMalformPkt)
- } else {
if cerr := mc.canceled.Value(); cerr != nil {
return cerr
}
if n == 0 && pktLen == len(data)-4 {
// only for the first loop iteration when nothing was written yet
+ mc.log(err)
return errBadConnNoWrite
+ } else {
+ return err
}
- mc.cleanup()
- errLog.Print(err)
}
- return ErrInvalidConn
+ if n != 4+size {
+ // io.Writer(b) must return a non-nil error if it cannot write len(b) bytes.
+ // The io.ErrShortWrite error is used to indicate that this rule has not been followed.
+ mc.cleanup()
+ return io.ErrShortWrite
+ }
+
+ mc.sequence++
+ if size != maxPacketSize {
+ return nil
+ }
+ pktLen -= size
+ data = data[size:]
}
}
@@ -186,11 +183,6 @@ func (mc *mysqlConn) writePacket(data []byte) error {
func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
data, err = mc.readPacket()
if err != nil {
- // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
- // in connection initialization we don't risk retrying non-idempotent actions.
- if err == ErrInvalidConn {
- return nil, "", driver.ErrBadConn
- }
return
}
@@ -234,12 +226,15 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro
if len(data) > pos {
// character set [1 byte]
// status flags [2 bytes]
+ pos += 3
// capability flags (upper 2 bytes) [2 bytes]
+ mc.flags |= clientFlag(binary.LittleEndian.Uint16(data[pos:pos+2])) << 16
+ pos += 2
// length of auth-plugin-data [1 byte]
// reserved (all [00]) [10 bytes]
- pos += 1 + 2 + 2 + 1 + 10
+ pos += 11
- // second part of the password cipher [mininum 13 bytes],
+ // second part of the password cipher [minimum 13 bytes],
// where len=MAX(13, length of auth-plugin-data - 8)
//
// The web documentation is ambiguous about the length. However,
@@ -285,12 +280,17 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
clientLocalFiles |
clientPluginAuth |
clientMultiResults |
+ mc.flags&clientConnectAttrs |
mc.flags&clientLongFlag
+ sendConnectAttrs := mc.flags&clientConnectAttrs != 0
+
if mc.cfg.ClientFoundRows {
clientFlags |= clientFoundRows
}
-
+ if mc.cfg.compress && mc.flags&clientCompress == clientCompress {
+ clientFlags |= clientCompress
+ }
// To enable TLS / SSL
if mc.cfg.TLS != nil {
clientFlags |= clientSSL
@@ -318,34 +318,38 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
pktLen += n + 1
}
+ // encode length of the connection attributes
+ var connAttrsLEI []byte
+ if sendConnectAttrs {
+ var connAttrsLEIBuf [9]byte
+ connAttrsLen := len(mc.connector.encodedAttributes)
+ connAttrsLEI = appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen))
+ pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes)
+ }
+
// Calculate packet length and get buffer with that size
- data, err := mc.buf.takeSmallBuffer(pktLen + 4)
+ data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ mc.cleanup()
+ return err
}
// ClientFlags [32 bit]
- data[4] = byte(clientFlags)
- data[5] = byte(clientFlags >> 8)
- data[6] = byte(clientFlags >> 16)
- data[7] = byte(clientFlags >> 24)
+ binary.LittleEndian.PutUint32(data[4:], uint32(clientFlags))
// MaxPacketSize [32 bit] (none)
- data[8] = 0x00
- data[9] = 0x00
- data[10] = 0x00
- data[11] = 0x00
+ binary.LittleEndian.PutUint32(data[8:], 0)
- // Charset [1 byte]
- var found bool
- data[12], found = collations[mc.cfg.Collation]
- if !found {
- // Note possibility for false negatives:
- // could be triggered although the collation is valid if the
- // collations map does not contain entries the server supports.
- return errors.New("unknown collation")
+ // Collation ID [1 byte]
+ data[12] = defaultCollationID
+ if cname := mc.cfg.Collation; cname != "" {
+ colID, ok := collations[cname]
+ if ok {
+ data[12] = colID
+ } else if len(mc.cfg.charsets) > 0 {
+ // When cfg.charset is set, the collation is set by `SET NAMES COLLATE `.
+ return fmt.Errorf("unknown collation: %q", cname)
+ }
}
// Filler [23 bytes] (all 0x00)
@@ -365,11 +369,12 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
// Switch to TLS
tlsConn := tls.Client(mc.netConn, mc.cfg.TLS)
if err := tlsConn.Handshake(); err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
return err
}
- mc.rawConn = mc.netConn
mc.netConn = tlsConn
- mc.buf.nc = tlsConn
}
// User [null terminated string]
@@ -394,6 +399,12 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
data[pos] = 0x00
pos++
+ // Connection Attributes
+ if sendConnectAttrs {
+ pos += copy(data[pos:], connAttrsLEI)
+ pos += copy(data[pos:], []byte(mc.connector.encodedAttributes))
+ }
+
// Send Auth packet
return mc.writePacket(data[:pos])
}
@@ -401,11 +412,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
pktLen := 4 + len(authData)
- data, err := mc.buf.takeSmallBuffer(pktLen)
+ data, err := mc.buf.takeBuffer(pktLen)
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ mc.cleanup()
+ return err
}
// Add the auth data [EOF]
@@ -419,32 +429,30 @@ func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
func (mc *mysqlConn) writeCommandPacket(command byte) error {
// Reset Packet Sequence
- mc.sequence = 0
+ mc.resetSequence()
data, err := mc.buf.takeSmallBuffer(4 + 1)
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ return err
}
// Add command byte
data[4] = command
// Send CMD packet
- return mc.writePacket(data)
+ err = mc.writePacket(data)
+ mc.syncSequence()
+ return err
}
func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
// Reset Packet Sequence
- mc.sequence = 0
+ mc.resetSequence()
pktLen := 1 + len(arg)
data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ return err
}
// Add command byte
@@ -454,31 +462,30 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
copy(data[5:], arg)
// Send CMD packet
- return mc.writePacket(data)
+ err = mc.writePacket(data)
+ mc.syncSequence()
+ return err
}
func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
// Reset Packet Sequence
- mc.sequence = 0
+ mc.resetSequence()
data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ return err
}
// Add command byte
data[4] = command
// Add arg [32 bit]
- data[5] = byte(arg)
- data[6] = byte(arg >> 8)
- data[7] = byte(arg >> 16)
- data[8] = byte(arg >> 24)
+ binary.LittleEndian.PutUint32(data[5:], arg)
// Send CMD packet
- return mc.writePacket(data)
+ err = mc.writePacket(data)
+ mc.syncSequence()
+ return err
}
/******************************************************************************
@@ -495,7 +502,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
switch data[0] {
case iOK:
- return nil, "", mc.handleOkPacket(data)
+ // resultUnchanged, since auth happens before any queries or
+ // commands have been executed.
+ return nil, "", mc.resultUnchanged().handleOkPacket(data)
case iAuthMoreData:
return data[1:], "", err
@@ -511,6 +520,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
}
plugin := string(data[1:pluginEndIndex])
authData := data[pluginEndIndex+1:]
+ if len(authData) > 0 && authData[len(authData)-1] == 0 {
+ authData = authData[:len(authData)-1]
+ }
return authData, plugin, nil
default: // Error otherwise
@@ -518,9 +530,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
}
}
-// Returns error if Packet is not an 'Result OK'-Packet
-func (mc *mysqlConn) readResultOK() error {
- data, err := mc.readPacket()
+// Returns error if Packet is not a 'Result OK'-Packet
+func (mc *okHandler) readResultOK() error {
+ data, err := mc.conn().readPacket()
if err != nil {
return err
}
@@ -528,35 +540,37 @@ func (mc *mysqlConn) readResultOK() error {
if data[0] == iOK {
return mc.handleOkPacket(data)
}
- return mc.handleErrorPacket(data)
+ return mc.conn().handleErrorPacket(data)
}
// Result Set Header Packet
-// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
-func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
- data, err := mc.readPacket()
- if err == nil {
- switch data[0] {
+// https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_query_response.html
+func (mc *okHandler) readResultSetHeaderPacket() (int, error) {
+ // handleOkPacket replaces both values; other cases leave the values unchanged.
+ mc.result.affectedRows = append(mc.result.affectedRows, 0)
+ mc.result.insertIds = append(mc.result.insertIds, 0)
- case iOK:
- return 0, mc.handleOkPacket(data)
-
- case iERR:
- return 0, mc.handleErrorPacket(data)
-
- case iLocalInFile:
- return 0, mc.handleInFileRequest(string(data[1:]))
- }
-
- // column count
- num, _, n := readLengthEncodedInteger(data)
- if n-len(data) == 0 {
- return int(num), nil
- }
-
- return 0, ErrMalformPkt
+ data, err := mc.conn().readPacket()
+ if err != nil {
+ return 0, err
}
- return 0, err
+
+ switch data[0] {
+ case iOK:
+ return 0, mc.handleOkPacket(data)
+
+ case iERR:
+ return 0, mc.conn().handleErrorPacket(data)
+
+ case iLocalInFile:
+ return 0, mc.handleInFileRequest(string(data[1:]))
+ }
+
+ // column count
+ // https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_query_response_text_resultset.html
+ num, _, _ := readLengthEncodedInteger(data)
+ // ignore remaining data in the packet. see #1478.
+ return int(num), nil
}
// Error Packet
@@ -573,7 +587,8 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error {
// 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
// 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
- if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // 1836: ER_READ_ONLY_MODE
+ if (errno == 1792 || errno == 1290 || errno == 1836) && mc.cfg.RejectReadOnly {
// Oops; we are connected to a read-only connection, and won't be able
// to issue any write statements. Since RejectReadOnly is configured,
// we throw away this connection hoping this one would have write
@@ -607,18 +622,61 @@ func readStatus(b []byte) statusFlag {
return statusFlag(b[0]) | statusFlag(b[1])<<8
}
+// Returns an instance of okHandler for codepaths where mysqlConn.result doesn't
+// need to be cleared first (e.g. during authentication, or while additional
+// resultsets are being fetched.)
+func (mc *mysqlConn) resultUnchanged() *okHandler {
+ return (*okHandler)(mc)
+}
+
+// okHandler represents the state of the connection when mysqlConn.result has
+// been prepared for processing of OK packets.
+//
+// To correctly populate mysqlConn.result (updated by handleOkPacket()), all
+// callpaths must either:
+//
+// 1. first clear it using clearResult(), or
+// 2. confirm that they don't need to (by calling resultUnchanged()).
+//
+// Both return an instance of type *okHandler.
+type okHandler mysqlConn
+
+// Exposes the underlying type's methods.
+func (mc *okHandler) conn() *mysqlConn {
+ return (*mysqlConn)(mc)
+}
+
+// clearResult clears the connection's stored affectedRows and insertIds
+// fields.
+//
+// It returns a handler that can process OK responses.
+func (mc *mysqlConn) clearResult() *okHandler {
+ mc.result = mysqlResult{}
+ return (*okHandler)(mc)
+}
+
// Ok Packet
// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
-func (mc *mysqlConn) handleOkPacket(data []byte) error {
+func (mc *okHandler) handleOkPacket(data []byte) error {
var n, m int
+ var affectedRows, insertId uint64
// 0x00 [1 byte]
// Affected rows [Length Coded Binary]
- mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+ affectedRows, _, n = readLengthEncodedInteger(data[1:])
// Insert id [Length Coded Binary]
- mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+ insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // Update for the current statement result (only used by
+ // readResultSetHeaderPacket).
+ if len(mc.result.affectedRows) > 0 {
+ mc.result.affectedRows[len(mc.result.affectedRows)-1] = int64(affectedRows)
+ }
+ if len(mc.result.insertIds) > 0 {
+ mc.result.insertIds[len(mc.result.insertIds)-1] = int64(insertId)
+ }
// server_status [2 bytes]
mc.status = readStatus(data[1+n+m : 1+n+m+2])
@@ -769,7 +827,8 @@ func (rows *textRows) readRow(dest []driver.Value) error {
for i := range dest {
// Read bytes and convert to string
- dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ var buf []byte
+ buf, isNull, n, err = readLengthEncodedString(data[pos:])
pos += n
if err != nil {
@@ -781,19 +840,40 @@ func (rows *textRows) readRow(dest []driver.Value) error {
continue
}
- if !mc.parseTime {
- continue
- }
-
- // Parse time field
switch rows.rs.columns[i].fieldType {
case fieldTypeTimestamp,
fieldTypeDateTime,
fieldTypeDate,
fieldTypeNewDate:
- if dest[i], err = parseDateTime(dest[i].([]byte), mc.cfg.Loc); err != nil {
- return err
+ if mc.parseTime {
+ dest[i], err = parseDateTime(buf, mc.cfg.Loc)
+ } else {
+ dest[i] = buf
}
+
+ case fieldTypeTiny, fieldTypeShort, fieldTypeInt24, fieldTypeYear, fieldTypeLong:
+ dest[i], err = strconv.ParseInt(string(buf), 10, 64)
+
+ case fieldTypeLongLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i], err = strconv.ParseUint(string(buf), 10, 64)
+ } else {
+ dest[i], err = strconv.ParseInt(string(buf), 10, 64)
+ }
+
+ case fieldTypeFloat:
+ var d float64
+ d, err = strconv.ParseFloat(string(buf), 32)
+ dest[i] = float32(d)
+
+ case fieldTypeDouble:
+ dest[i], err = strconv.ParseFloat(string(buf), 64)
+
+ default:
+ dest[i] = buf
+ }
+ if err != nil {
+ return err
}
}
@@ -875,32 +955,26 @@ func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
pktLen = dataOffset + argLen
}
- stmt.mc.sequence = 0
// Add command byte [1 byte]
data[4] = comStmtSendLongData
// Add stmtID [32 bit]
- data[5] = byte(stmt.id)
- data[6] = byte(stmt.id >> 8)
- data[7] = byte(stmt.id >> 16)
- data[8] = byte(stmt.id >> 24)
+ binary.LittleEndian.PutUint32(data[5:], stmt.id)
// Add paramID [16 bit]
- data[9] = byte(paramID)
- data[10] = byte(paramID >> 8)
+ binary.LittleEndian.PutUint16(data[9:], uint16(paramID))
// Send CMD packet
err := stmt.mc.writePacket(data[:4+pktLen])
+ // Every COM_LONG_DATA packet reset Packet Sequence
+ stmt.mc.resetSequence()
if err == nil {
data = data[pktLen-dataOffset:]
continue
}
return err
-
}
- // Reset Packet Sequence
- stmt.mc.sequence = 0
return nil
}
@@ -925,7 +999,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
// Reset packet-sequence
- mc.sequence = 0
+ mc.resetSequence()
var data []byte
var err error
@@ -937,28 +1011,20 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// In this case the len(data) == cap(data) which is used to optimise the flow below.
}
if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
+ return err
}
// command [1 byte]
data[4] = comStmtExecute
// statement_id [4 bytes]
- data[5] = byte(stmt.id)
- data[6] = byte(stmt.id >> 8)
- data[7] = byte(stmt.id >> 16)
- data[8] = byte(stmt.id >> 24)
+ binary.LittleEndian.PutUint32(data[5:], stmt.id)
// flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
data[9] = 0x00
// iteration_count (uint32(1)) [4 bytes]
- data[10] = 0x01
- data[11] = 0x00
- data[12] = 0x00
- data[13] = 0x00
+ binary.LittleEndian.PutUint32(data[10:], 1)
if len(args) > 0 {
pos := minPktLen
@@ -1012,50 +1078,17 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
case int64:
paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x00
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- uint64(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(uint64(v))...,
- )
- }
+ paramValues = binary.LittleEndian.AppendUint64(paramValues, uint64(v))
case uint64:
paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x80 // type is unsigned
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- uint64(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(uint64(v))...,
- )
- }
+ paramValues = binary.LittleEndian.AppendUint64(paramValues, uint64(v))
case float64:
paramTypes[i+i] = byte(fieldTypeDouble)
paramTypes[i+i+1] = 0x00
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- math.Float64bits(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(math.Float64bits(v))...,
- )
- }
+ paramValues = binary.LittleEndian.AppendUint64(paramValues, math.Float64bits(v))
case bool:
paramTypes[i+i] = byte(fieldTypeTiny)
@@ -1116,7 +1149,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if v.IsZero() {
b = append(b, "0000-00-00"...)
} else {
- b, err = appendDateTime(b, v.In(mc.cfg.Loc))
+ b, err = appendDateTime(b, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
if err != nil {
return err
}
@@ -1136,20 +1169,21 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// In that case we must build the data packet with the new values buffer
if valuesCap != cap(paramValues) {
data = append(data[:pos], paramValues...)
- if err = mc.buf.store(data); err != nil {
- errLog.Print(err)
- return errBadConnNoWrite
- }
+ mc.buf.store(data) // allow this buffer to be reused
}
pos += len(paramValues)
data = data[:pos]
}
- return mc.writePacket(data)
+ err = mc.writePacket(data)
+ mc.syncSequence()
+ return err
}
-func (mc *mysqlConn) discardResults() error {
+// For each remaining resultset in the stream, discards its rows and updates
+// mc.affectedRows and mc.insertIds.
+func (mc *okHandler) discardResults() error {
for mc.status&statusMoreResultsExists != 0 {
resLen, err := mc.readResultSetHeaderPacket()
if err != nil {
@@ -1157,11 +1191,11 @@ func (mc *mysqlConn) discardResults() error {
}
if resLen > 0 {
// columns
- if err := mc.readUntilEOF(); err != nil {
+ if err := mc.conn().readUntilEOF(); err != nil {
return err
}
// rows
- if err := mc.readUntilEOF(); err != nil {
+ if err := mc.conn().readUntilEOF(); err != nil {
return err
}
}
@@ -1268,7 +1302,8 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
- fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeVector:
var isNull bool
var n int
dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
index c6438d03..d5163146 100644
--- a/vendor/github.com/go-sql-driver/mysql/result.go
+++ b/vendor/github.com/go-sql-driver/mysql/result.go
@@ -8,15 +8,43 @@
package mysql
+import "database/sql/driver"
+
+// Result exposes data not available through *connection.Result.
+//
+// This is accessible by executing statements using sql.Conn.Raw() and
+// downcasting the returned result:
+//
+// res, err := rawConn.Exec(...)
+// res.(mysql.Result).AllRowsAffected()
+type Result interface {
+ driver.Result
+ // AllRowsAffected returns a slice containing the affected rows for each
+ // executed statement.
+ AllRowsAffected() []int64
+ // AllLastInsertIds returns a slice containing the last inserted ID for each
+ // executed statement.
+ AllLastInsertIds() []int64
+}
+
type mysqlResult struct {
- affectedRows int64
- insertId int64
+ // One entry in both slices is created for every executed statement result.
+ affectedRows []int64
+ insertIds []int64
}
func (res *mysqlResult) LastInsertId() (int64, error) {
- return res.insertId, nil
+ return res.insertIds[len(res.insertIds)-1], nil
}
func (res *mysqlResult) RowsAffected() (int64, error) {
- return res.affectedRows, nil
+ return res.affectedRows[len(res.affectedRows)-1], nil
+}
+
+func (res *mysqlResult) AllLastInsertIds() []int64 {
+ return append([]int64{}, res.insertIds...) // defensive copy
+}
+
+func (res *mysqlResult) AllRowsAffected() []int64 {
+ return append([]int64{}, res.affectedRows...) // defensive copy
}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
index 888bdb5f..df98417b 100644
--- a/vendor/github.com/go-sql-driver/mysql/rows.go
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -111,19 +111,13 @@ func (rows *mysqlRows) Close() (err error) {
return err
}
- // flip the buffer for this connection if we need to drain it.
- // note that for a successful query (i.e. one where rows.next()
- // has been called until it returns false), `rows.mc` will be nil
- // by the time the user calls `(*Rows).Close`, so we won't reach this
- // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
- mc.buf.flip()
-
// Remove unread packets from stream
if !rows.rs.done {
err = mc.readUntilEOF()
}
if err == nil {
- if err = mc.discardResults(); err != nil {
+ handleOk := mc.clearResult()
+ if err = handleOk.discardResults(); err != nil {
return err
}
}
@@ -160,7 +154,15 @@ func (rows *mysqlRows) nextResultSet() (int, error) {
return 0, io.EOF
}
rows.rs = resultSet{}
- return rows.mc.readResultSetHeaderPacket()
+ // rows.mc.affectedRows and rows.mc.insertIds accumulate on each call to
+ // nextResultSet.
+ resLen, err := rows.mc.resultUnchanged().readResultSetHeaderPacket()
+ if err != nil {
+ // Clean up about multi-results flag
+ rows.rs.done = true
+ rows.mc.status = rows.mc.status & (^statusMoreResultsExists)
+ }
+ return resLen, err
}
func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
index 10ece8bd..35df8545 100644
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -24,11 +24,12 @@ type mysqlStmt struct {
func (stmt *mysqlStmt) Close() error {
if stmt.mc == nil || stmt.mc.closed.Load() {
- // driver.Stmt.Close can be called more than once, thus this function
- // has to be idempotent.
- // See also Issue #450 and golang/go#16019.
- //errLog.Print(ErrInvalidConn)
- return driver.ErrBadConn
+ // driver.Stmt.Close could be called more than once, thus this function
+ // had to be idempotent. See also Issue #450 and golang/go#16019.
+ // This bug has been fixed in Go 1.8.
+ // https://github.com/golang/go/commit/90b8a0ca2d0b565c7c7199ffcf77b15ea6b6db3a
+ // But we keep this function idempotent because it is safer.
+ return nil
}
err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
@@ -51,7 +52,6 @@ func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
if stmt.mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@@ -61,12 +61,10 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
}
mc := stmt.mc
-
- mc.affectedRows = 0
- mc.insertId = 0
+ handleOk := stmt.mc.clearResult()
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
@@ -83,14 +81,12 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
}
}
- if err := mc.discardResults(); err != nil {
+ if err := handleOk.discardResults(); err != nil {
return nil, err
}
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, nil
+ copied := mc.result
+ return &copied, nil
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
@@ -99,7 +95,6 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
if stmt.mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@@ -111,7 +106,8 @@ func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
mc := stmt.mc
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ handleOk := stmt.mc.clearResult()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
@@ -144,7 +140,7 @@ type converter struct{}
// implementation does not. This function should be kept in sync with
// database/sql/driver defaultConverter.ConvertValue() except for that
// deliberate difference.
-func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+func (c converter) ConvertValue(v any) (driver.Value, error) {
if driver.IsValue(v) {
return v, nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
index 4a4b6100..8c502f49 100644
--- a/vendor/github.com/go-sql-driver/mysql/transaction.go
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -13,18 +13,32 @@ type mysqlTx struct {
}
func (tx *mysqlTx) Commit() (err error) {
- if tx.mc == nil || tx.mc.closed.Load() {
+ if tx.mc == nil {
return ErrInvalidConn
}
+ if tx.mc.closed.Load() {
+ err = tx.mc.error()
+ if err == nil {
+ err = ErrInvalidConn
+ }
+ return
+ }
err = tx.mc.exec("COMMIT")
tx.mc = nil
return
}
func (tx *mysqlTx) Rollback() (err error) {
- if tx.mc == nil || tx.mc.closed.Load() {
+ if tx.mc == nil {
return ErrInvalidConn
}
+ if tx.mc.closed.Load() {
+ err = tx.mc.error()
+ if err == nil {
+ err = ErrInvalidConn
+ }
+ return
+ }
err = tx.mc.exec("ROLLBACK")
tx.mc = nil
return
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
index 15dbd8d1..8716c26c 100644
--- a/vendor/github.com/go-sql-driver/mysql/utils.go
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -36,7 +36,7 @@ var (
// registering it.
//
// rootCertPool := x509.NewCertPool()
-// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// pem, err := os.ReadFile("/path/ca-cert.pem")
// if err != nil {
// log.Fatal(err)
// }
@@ -265,7 +265,11 @@ func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Va
return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
}
-func appendDateTime(buf []byte, t time.Time) ([]byte, error) {
+func appendDateTime(buf []byte, t time.Time, timeTruncate time.Duration) ([]byte, error) {
+ if timeTruncate > 0 {
+ t = t.Truncate(timeTruncate)
+ }
+
year, month, day := t.Date()
hour, min, sec := t.Clock()
nsec := t.Nanosecond()
@@ -486,17 +490,16 @@ func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
* Convert from and to bytes *
******************************************************************************/
-func uint64ToBytes(n uint64) []byte {
- return []byte{
- byte(n),
- byte(n >> 8),
- byte(n >> 16),
- byte(n >> 24),
- byte(n >> 32),
- byte(n >> 40),
- byte(n >> 48),
- byte(n >> 56),
- }
+// 24bit integer: used for packet headers.
+
+func putUint24(data []byte, n int) {
+ data[2] = byte(n >> 16)
+ data[1] = byte(n >> 8)
+ data[0] = byte(n)
+}
+
+func getUint24(data []byte) int {
+ return int(data[2])<<16 | int(data[1])<<8 | int(data[0])
}
func uint64ToString(n uint64) []byte {
@@ -521,16 +524,6 @@ func uint64ToString(n uint64) []byte {
return a[i:]
}
-// treats string value as unsigned integer representation
-func stringToInt(b []byte) int {
- val := 0
- for i := range b {
- val *= 10
- val += int(b[i] - 0x30)
- }
- return val
-}
-
// returns the string read as a bytes slice, whether the value is NULL,
// the number of bytes read and an error, in case the string is longer than
// the input slice
@@ -582,18 +575,15 @@ func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
// 252: value of following 2
case 0xfc:
- return uint64(b[1]) | uint64(b[2])<<8, false, 3
+ return uint64(binary.LittleEndian.Uint16(b[1:])), false, 3
// 253: value of following 3
case 0xfd:
- return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
+ return uint64(getUint24(b[1:])), false, 4
// 254: value of following 8
case 0xfe:
- return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
- uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
- uint64(b[7])<<48 | uint64(b[8])<<56,
- false, 9
+ return uint64(binary.LittleEndian.Uint64(b[1:])), false, 9
}
// 0-250: value of first byte
@@ -607,13 +597,19 @@ func appendLengthEncodedInteger(b []byte, n uint64) []byte {
return append(b, byte(n))
case n <= 0xffff:
- return append(b, 0xfc, byte(n), byte(n>>8))
+ b = append(b, 0xfc)
+ return binary.LittleEndian.AppendUint16(b, uint16(n))
case n <= 0xffffff:
return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
}
- return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
- byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
+ b = append(b, 0xfe)
+ return binary.LittleEndian.AppendUint64(b, n)
+}
+
+func appendLengthEncodedString(b []byte, s string) []byte {
+ b = appendLengthEncodedInteger(b, uint64(len(s)))
+ return append(b, s...)
}
// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
diff --git a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md
deleted file mode 100644
index c4efbd2a..00000000
--- a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md
+++ /dev/null
@@ -1,22 +0,0 @@
-## Migration Guide (v3.2.1)
-
-Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1]), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path.
-
-### go.mod replacement
-
-In a first step, the easiest way is to use `go mod edit` to issue a replacement.
-
-```
-go mod edit -replace github.com/dgrijalva/jwt-go=github.com/golang-jwt/jwt@v3.2.1+incompatible
-go mod tidy
-```
-
-This will still keep the old import path in your code but replace it with the new package and also introduce a new indirect dependency to `github.com/golang-jwt/jwt`. Try to compile your project; it should still work.
-
-### Cleanup
-
-If your code still consistently builds, you can replace all occurences of `github.com/dgrijalva/jwt-go` with `github.com/golang-jwt/jwt`, either manually or by using tools such as `sed`. Finally, the `replace` directive in the `go.mod` file can be removed.
-
-## Older releases (before v3.2.0)
-
-The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
\ No newline at end of file
diff --git a/vendor/github.com/golang-jwt/jwt/README.md b/vendor/github.com/golang-jwt/jwt/README.md
deleted file mode 100644
index 9b653e46..00000000
--- a/vendor/github.com/golang-jwt/jwt/README.md
+++ /dev/null
@@ -1,113 +0,0 @@
-# jwt-go
-
-[](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
-[](https://pkg.go.dev/github.com/golang-jwt/jwt)
-
-A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
-
-**IMPORT PATH CHANGE:** Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic.
-
-Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path.
-
-**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail.
-
-**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
-
-### Supported Go versions
-
-Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy).
-So we will support a major version of Go until there are two newer major releases.
-We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities
-which will not be fixed.
-
-## What the heck is a JWT?
-
-JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
-
-In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way.
-
-The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
-
-The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own.
-
-## What's in the box?
-
-This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
-
-## Examples
-
-See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage:
-
-* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac)
-* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac)
-* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples)
-
-## Extensions
-
-This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
-
-Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go
-
-## Compliance
-
-This library was last reviewed to comply with [RTF 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences:
-
-* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
-
-## Project Status & Versioning
-
-This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
-
-This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases).
-
-While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/golang-jwt/jwt.v3`. It will do the right thing WRT semantic versioning.
-
-**BREAKING CHANGES:***
-* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
-
-## Usage Tips
-
-### Signing vs Encryption
-
-A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
-
-* The author of the token was in the possession of the signing secret
-* The data has not been modified since it was signed
-
-It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
-
-### Choosing a Signing Method
-
-There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
-
-Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
-
-Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
-
-### Signing Methods and Key Types
-
-Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
-
-* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
-* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
-* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
-
-### JWT and OAuth
-
-It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
-
-Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
-
-* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
-* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
-* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
-
-### Troubleshooting
-
-This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types.
-
-## More
-
-Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt).
-
-The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
diff --git a/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md
deleted file mode 100644
index 637f2ba6..00000000
--- a/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md
+++ /dev/null
@@ -1,131 +0,0 @@
-## `jwt-go` Version History
-
-#### 3.2.2
-
-* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
-* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)).
-* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)).
-* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)).
-
-#### 3.2.1
-
-* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code
- * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`
-* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160
-
-#### 3.2.0
-
-* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
-* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
-* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
-* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
-
-#### 3.1.0
-
-* Improvements to `jwt` command line tool
-* Added `SkipClaimsValidation` option to `Parser`
-* Documentation updates
-
-#### 3.0.0
-
-* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
- * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
- * `ParseFromRequest` has been moved to `request` subpackage and usage has changed
- * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
-* Other Additions and Changes
- * Added `Claims` interface type to allow users to decode the claims into a custom type
- * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
- * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
- * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
- * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
- * Added several new, more specific, validation errors to error type bitmask
- * Moved examples from README to executable example files
- * Signing method registry is now thread safe
- * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
-
-#### 2.7.0
-
-This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
-
-* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
-* Error text for expired tokens includes how long it's been expired
-* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
-* Documentation updates
-
-#### 2.6.0
-
-* Exposed inner error within ValidationError
-* Fixed validation errors when using UseJSONNumber flag
-* Added several unit tests
-
-#### 2.5.0
-
-* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
-* Updated/fixed some documentation
-* Added more helpful error message when trying to parse tokens that begin with `BEARER `
-
-#### 2.4.0
-
-* Added new type, Parser, to allow for configuration of various parsing parameters
- * You can now specify a list of valid signing methods. Anything outside this set will be rejected.
- * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
-* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
-* Fixed some bugs with ECDSA parsing
-
-#### 2.3.0
-
-* Added support for ECDSA signing methods
-* Added support for RSA PSS signing methods (requires go v1.4)
-
-#### 2.2.0
-
-* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
-
-#### 2.1.0
-
-Backwards compatible API change that was missed in 2.0.0.
-
-* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
-
-#### 2.0.0
-
-There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
-
-The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
-
-It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
-
-* **Compatibility Breaking Changes**
- * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
- * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
- * `KeyFunc` now returns `interface{}` instead of `[]byte`
- * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
- * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
-* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
- * Added public package global `SigningMethodHS256`
- * Added public package global `SigningMethodHS384`
- * Added public package global `SigningMethodHS512`
-* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
- * Added public package global `SigningMethodRS256`
- * Added public package global `SigningMethodRS384`
- * Added public package global `SigningMethodRS512`
-* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
-* Refactored the RSA implementation to be easier to read
-* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
-
-#### 1.0.2
-
-* Fixed bug in parsing public keys from certificates
-* Added more tests around the parsing of keys for RS256
-* Code refactoring in RS256 implementation. No functional changes
-
-#### 1.0.1
-
-* Fixed panic if RS256 signing method was passed an invalid key
-
-#### 1.0.0
-
-* First versioned release
-* API stabilized
-* Supports creating, signing, parsing, and validating JWT tokens
-* Supports RS256 and HS256 signing methods
diff --git a/vendor/github.com/golang-jwt/jwt/claims.go b/vendor/github.com/golang-jwt/jwt/claims.go
deleted file mode 100644
index f1dba3cb..00000000
--- a/vendor/github.com/golang-jwt/jwt/claims.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package jwt
-
-import (
- "crypto/subtle"
- "fmt"
- "time"
-)
-
-// For a type to be a Claims object, it must just have a Valid method that determines
-// if the token is invalid for any supported reason
-type Claims interface {
- Valid() error
-}
-
-// Structured version of Claims Section, as referenced at
-// https://tools.ietf.org/html/rfc7519#section-4.1
-// See examples for how to use this with your own claim types
-type StandardClaims struct {
- Audience string `json:"aud,omitempty"`
- ExpiresAt int64 `json:"exp,omitempty"`
- Id string `json:"jti,omitempty"`
- IssuedAt int64 `json:"iat,omitempty"`
- Issuer string `json:"iss,omitempty"`
- NotBefore int64 `json:"nbf,omitempty"`
- Subject string `json:"sub,omitempty"`
-}
-
-// Validates time based claims "exp, iat, nbf".
-// There is no accounting for clock skew.
-// As well, if any of the above claims are not in the token, it will still
-// be considered a valid claim.
-func (c StandardClaims) Valid() error {
- vErr := new(ValidationError)
- now := TimeFunc().Unix()
-
- // The claims below are optional, by default, so if they are set to the
- // default value in Go, let's not fail the verification for them.
- if !c.VerifyExpiresAt(now, false) {
- delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
- vErr.Inner = fmt.Errorf("token is expired by %v", delta)
- vErr.Errors |= ValidationErrorExpired
- }
-
- if !c.VerifyIssuedAt(now, false) {
- vErr.Inner = fmt.Errorf("Token used before issued")
- vErr.Errors |= ValidationErrorIssuedAt
- }
-
- if !c.VerifyNotBefore(now, false) {
- vErr.Inner = fmt.Errorf("token is not valid yet")
- vErr.Errors |= ValidationErrorNotValidYet
- }
-
- if vErr.valid() {
- return nil
- }
-
- return vErr
-}
-
-// Compares the aud claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
- return verifyAud([]string{c.Audience}, cmp, req)
-}
-
-// Compares the exp claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
- return verifyExp(c.ExpiresAt, cmp, req)
-}
-
-// Compares the iat claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
- return verifyIat(c.IssuedAt, cmp, req)
-}
-
-// Compares the iss claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
- return verifyIss(c.Issuer, cmp, req)
-}
-
-// Compares the nbf claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
- return verifyNbf(c.NotBefore, cmp, req)
-}
-
-// ----- helpers
-
-func verifyAud(aud []string, cmp string, required bool) bool {
- if len(aud) == 0 {
- return !required
- }
- // use a var here to keep constant time compare when looping over a number of claims
- result := false
-
- var stringClaims string
- for _, a := range aud {
- if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
- result = true
- }
- stringClaims = stringClaims + a
- }
-
- // case where "" is sent in one or many aud claims
- if len(stringClaims) == 0 {
- return !required
- }
-
- return result
-}
-
-func verifyExp(exp int64, now int64, required bool) bool {
- if exp == 0 {
- return !required
- }
- return now <= exp
-}
-
-func verifyIat(iat int64, now int64, required bool) bool {
- if iat == 0 {
- return !required
- }
- return now >= iat
-}
-
-func verifyIss(iss string, cmp string, required bool) bool {
- if iss == "" {
- return !required
- }
- if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
- return true
- } else {
- return false
- }
-}
-
-func verifyNbf(nbf int64, now int64, required bool) bool {
- if nbf == 0 {
- return !required
- }
- return now >= nbf
-}
diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa.go b/vendor/github.com/golang-jwt/jwt/ecdsa.go
deleted file mode 100644
index 15e23435..00000000
--- a/vendor/github.com/golang-jwt/jwt/ecdsa.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/rand"
- "errors"
- "math/big"
-)
-
-var (
- // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
- ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
-)
-
-// Implements the ECDSA family of signing methods signing methods
-// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
-type SigningMethodECDSA struct {
- Name string
- Hash crypto.Hash
- KeySize int
- CurveBits int
-}
-
-// Specific instances for EC256 and company
-var (
- SigningMethodES256 *SigningMethodECDSA
- SigningMethodES384 *SigningMethodECDSA
- SigningMethodES512 *SigningMethodECDSA
-)
-
-func init() {
- // ES256
- SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
- RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
- return SigningMethodES256
- })
-
- // ES384
- SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
- RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
- return SigningMethodES384
- })
-
- // ES512
- SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
- RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
- return SigningMethodES512
- })
-}
-
-func (m *SigningMethodECDSA) Alg() string {
- return m.Name
-}
-
-// Implements the Verify method from SigningMethod
-// For this verify method, key must be an ecdsa.PublicKey struct
-func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
- var err error
-
- // Decode the signature
- var sig []byte
- if sig, err = DecodeSegment(signature); err != nil {
- return err
- }
-
- // Get the key
- var ecdsaKey *ecdsa.PublicKey
- switch k := key.(type) {
- case *ecdsa.PublicKey:
- ecdsaKey = k
- default:
- return ErrInvalidKeyType
- }
-
- if len(sig) != 2*m.KeySize {
- return ErrECDSAVerification
- }
-
- r := big.NewInt(0).SetBytes(sig[:m.KeySize])
- s := big.NewInt(0).SetBytes(sig[m.KeySize:])
-
- // Create hasher
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Verify the signature
- if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
- return nil
- }
-
- return ErrECDSAVerification
-}
-
-// Implements the Sign method from SigningMethod
-// For this signing method, key must be an ecdsa.PrivateKey struct
-func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
- // Get the key
- var ecdsaKey *ecdsa.PrivateKey
- switch k := key.(type) {
- case *ecdsa.PrivateKey:
- ecdsaKey = k
- default:
- return "", ErrInvalidKeyType
- }
-
- // Create the hasher
- if !m.Hash.Available() {
- return "", ErrHashUnavailable
- }
-
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Sign the string and return r, s
- if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
- curveBits := ecdsaKey.Curve.Params().BitSize
-
- if m.CurveBits != curveBits {
- return "", ErrInvalidKey
- }
-
- keyBytes := curveBits / 8
- if curveBits%8 > 0 {
- keyBytes += 1
- }
-
- // We serialize the outputs (r and s) into big-endian byte arrays
- // padded with zeros on the left to make sure the sizes work out.
- // Output must be 2*keyBytes long.
- out := make([]byte, 2*keyBytes)
- r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
- s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
-
- return EncodeSegment(out), nil
- } else {
- return "", err
- }
-}
diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go
deleted file mode 100644
index db9f4be7..00000000
--- a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package jwt
-
-import (
- "crypto/ecdsa"
- "crypto/x509"
- "encoding/pem"
- "errors"
-)
-
-var (
- ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
- ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
-)
-
-// Parse PEM encoded Elliptic Curve Private Key Structure
-func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
- return nil, err
- }
- }
-
- var pkey *ecdsa.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
- return nil, ErrNotECPrivateKey
- }
-
- return pkey, nil
-}
-
-// Parse PEM encoded PKCS1 or PKCS8 public key
-func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
- if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
- parsedKey = cert.PublicKey
- } else {
- return nil, err
- }
- }
-
- var pkey *ecdsa.PublicKey
- var ok bool
- if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
- return nil, ErrNotECPublicKey
- }
-
- return pkey, nil
-}
diff --git a/vendor/github.com/golang-jwt/jwt/ed25519.go b/vendor/github.com/golang-jwt/jwt/ed25519.go
deleted file mode 100644
index a2f8ddbe..00000000
--- a/vendor/github.com/golang-jwt/jwt/ed25519.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package jwt
-
-import (
- "errors"
-
- "crypto/ed25519"
-)
-
-var (
- ErrEd25519Verification = errors.New("ed25519: verification error")
-)
-
-// Implements the EdDSA family
-// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
-type SigningMethodEd25519 struct{}
-
-// Specific instance for EdDSA
-var (
- SigningMethodEdDSA *SigningMethodEd25519
-)
-
-func init() {
- SigningMethodEdDSA = &SigningMethodEd25519{}
- RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
- return SigningMethodEdDSA
- })
-}
-
-func (m *SigningMethodEd25519) Alg() string {
- return "EdDSA"
-}
-
-// Implements the Verify method from SigningMethod
-// For this verify method, key must be an ed25519.PublicKey
-func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error {
- var err error
- var ed25519Key ed25519.PublicKey
- var ok bool
-
- if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
- return ErrInvalidKeyType
- }
-
- if len(ed25519Key) != ed25519.PublicKeySize {
- return ErrInvalidKey
- }
-
- // Decode the signature
- var sig []byte
- if sig, err = DecodeSegment(signature); err != nil {
- return err
- }
-
- // Verify the signature
- if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
- return ErrEd25519Verification
- }
-
- return nil
-}
-
-// Implements the Sign method from SigningMethod
-// For this signing method, key must be an ed25519.PrivateKey
-func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) {
- var ed25519Key ed25519.PrivateKey
- var ok bool
-
- if ed25519Key, ok = key.(ed25519.PrivateKey); !ok {
- return "", ErrInvalidKeyType
- }
-
- // ed25519.Sign panics if private key not equal to ed25519.PrivateKeySize
- // this allows to avoid recover usage
- if len(ed25519Key) != ed25519.PrivateKeySize {
- return "", ErrInvalidKey
- }
-
- // Sign the string and return the encoded result
- sig := ed25519.Sign(ed25519Key, []byte(signingString))
- return EncodeSegment(sig), nil
-}
diff --git a/vendor/github.com/golang-jwt/jwt/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/ed25519_utils.go
deleted file mode 100644
index c6357275..00000000
--- a/vendor/github.com/golang-jwt/jwt/ed25519_utils.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/ed25519"
- "crypto/x509"
- "encoding/pem"
- "errors"
-)
-
-var (
- ErrNotEdPrivateKey = errors.New("Key is not a valid Ed25519 private key")
- ErrNotEdPublicKey = errors.New("Key is not a valid Ed25519 public key")
-)
-
-// Parse PEM-encoded Edwards curve private key
-func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
- return nil, err
- }
-
- var pkey ed25519.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
- return nil, ErrNotEdPrivateKey
- }
-
- return pkey, nil
-}
-
-// Parse PEM-encoded Edwards curve public key
-func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
- return nil, err
- }
-
- var pkey ed25519.PublicKey
- var ok bool
- if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
- return nil, ErrNotEdPublicKey
- }
-
- return pkey, nil
-}
diff --git a/vendor/github.com/golang-jwt/jwt/errors.go b/vendor/github.com/golang-jwt/jwt/errors.go
deleted file mode 100644
index 1c93024a..00000000
--- a/vendor/github.com/golang-jwt/jwt/errors.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package jwt
-
-import (
- "errors"
-)
-
-// Error constants
-var (
- ErrInvalidKey = errors.New("key is invalid")
- ErrInvalidKeyType = errors.New("key is of invalid type")
- ErrHashUnavailable = errors.New("the requested hash function is unavailable")
-)
-
-// The errors that might occur when parsing and validating a token
-const (
- ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
- ValidationErrorUnverifiable // Token could not be verified because of signing problems
- ValidationErrorSignatureInvalid // Signature validation failed
-
- // Standard Claim validation errors
- ValidationErrorAudience // AUD validation failed
- ValidationErrorExpired // EXP validation failed
- ValidationErrorIssuedAt // IAT validation failed
- ValidationErrorIssuer // ISS validation failed
- ValidationErrorNotValidYet // NBF validation failed
- ValidationErrorId // JTI validation failed
- ValidationErrorClaimsInvalid // Generic claims validation error
-)
-
-// Helper for constructing a ValidationError with a string error message
-func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
- return &ValidationError{
- text: errorText,
- Errors: errorFlags,
- }
-}
-
-// The error from Parse if token is not valid
-type ValidationError struct {
- Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
- Errors uint32 // bitfield. see ValidationError... constants
- text string // errors that do not have a valid error just have text
-}
-
-// Validation error is an error type
-func (e ValidationError) Error() string {
- if e.Inner != nil {
- return e.Inner.Error()
- } else if e.text != "" {
- return e.text
- } else {
- return "token is invalid"
- }
-}
-
-// No errors
-func (e *ValidationError) valid() bool {
- return e.Errors == 0
-}
diff --git a/vendor/github.com/golang-jwt/jwt/hmac.go b/vendor/github.com/golang-jwt/jwt/hmac.go
deleted file mode 100644
index addbe5d4..00000000
--- a/vendor/github.com/golang-jwt/jwt/hmac.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/hmac"
- "errors"
-)
-
-// Implements the HMAC-SHA family of signing methods signing methods
-// Expects key type of []byte for both signing and validation
-type SigningMethodHMAC struct {
- Name string
- Hash crypto.Hash
-}
-
-// Specific instances for HS256 and company
-var (
- SigningMethodHS256 *SigningMethodHMAC
- SigningMethodHS384 *SigningMethodHMAC
- SigningMethodHS512 *SigningMethodHMAC
- ErrSignatureInvalid = errors.New("signature is invalid")
-)
-
-func init() {
- // HS256
- SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
- RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
- return SigningMethodHS256
- })
-
- // HS384
- SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
- RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
- return SigningMethodHS384
- })
-
- // HS512
- SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
- RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
- return SigningMethodHS512
- })
-}
-
-func (m *SigningMethodHMAC) Alg() string {
- return m.Name
-}
-
-// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
-func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
- // Verify the key is the right type
- keyBytes, ok := key.([]byte)
- if !ok {
- return ErrInvalidKeyType
- }
-
- // Decode signature, for comparison
- sig, err := DecodeSegment(signature)
- if err != nil {
- return err
- }
-
- // Can we use the specified hashing method?
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
-
- // This signing method is symmetric, so we validate the signature
- // by reproducing the signature from the signing string and key, then
- // comparing that against the provided signature.
- hasher := hmac.New(m.Hash.New, keyBytes)
- hasher.Write([]byte(signingString))
- if !hmac.Equal(sig, hasher.Sum(nil)) {
- return ErrSignatureInvalid
- }
-
- // No validation errors. Signature is good.
- return nil
-}
-
-// Implements the Sign method from SigningMethod for this signing method.
-// Key must be []byte
-func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
- if keyBytes, ok := key.([]byte); ok {
- if !m.Hash.Available() {
- return "", ErrHashUnavailable
- }
-
- hasher := hmac.New(m.Hash.New, keyBytes)
- hasher.Write([]byte(signingString))
-
- return EncodeSegment(hasher.Sum(nil)), nil
- }
-
- return "", ErrInvalidKeyType
-}
diff --git a/vendor/github.com/golang-jwt/jwt/map_claims.go b/vendor/github.com/golang-jwt/jwt/map_claims.go
deleted file mode 100644
index 72c79f92..00000000
--- a/vendor/github.com/golang-jwt/jwt/map_claims.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package jwt
-
-import (
- "encoding/json"
- "errors"
- // "fmt"
-)
-
-// Claims type that uses the map[string]interface{} for JSON decoding
-// This is the default claims type if you don't supply one
-type MapClaims map[string]interface{}
-
-// VerifyAudience Compares the aud claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
- var aud []string
- switch v := m["aud"].(type) {
- case string:
- aud = append(aud, v)
- case []string:
- aud = v
- case []interface{}:
- for _, a := range v {
- vs, ok := a.(string)
- if !ok {
- return false
- }
- aud = append(aud, vs)
- }
- }
- return verifyAud(aud, cmp, req)
-}
-
-// Compares the exp claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
- exp, ok := m["exp"]
- if !ok {
- return !req
- }
- switch expType := exp.(type) {
- case float64:
- return verifyExp(int64(expType), cmp, req)
- case json.Number:
- v, _ := expType.Int64()
- return verifyExp(v, cmp, req)
- }
- return false
-}
-
-// Compares the iat claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
- iat, ok := m["iat"]
- if !ok {
- return !req
- }
- switch iatType := iat.(type) {
- case float64:
- return verifyIat(int64(iatType), cmp, req)
- case json.Number:
- v, _ := iatType.Int64()
- return verifyIat(v, cmp, req)
- }
- return false
-}
-
-// Compares the iss claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
- iss, _ := m["iss"].(string)
- return verifyIss(iss, cmp, req)
-}
-
-// Compares the nbf claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
- nbf, ok := m["nbf"]
- if !ok {
- return !req
- }
- switch nbfType := nbf.(type) {
- case float64:
- return verifyNbf(int64(nbfType), cmp, req)
- case json.Number:
- v, _ := nbfType.Int64()
- return verifyNbf(v, cmp, req)
- }
- return false
-}
-
-// Validates time based claims "exp, iat, nbf".
-// There is no accounting for clock skew.
-// As well, if any of the above claims are not in the token, it will still
-// be considered a valid claim.
-func (m MapClaims) Valid() error {
- vErr := new(ValidationError)
- now := TimeFunc().Unix()
-
- if !m.VerifyExpiresAt(now, false) {
- vErr.Inner = errors.New("Token is expired")
- vErr.Errors |= ValidationErrorExpired
- }
-
- if !m.VerifyIssuedAt(now, false) {
- vErr.Inner = errors.New("Token used before issued")
- vErr.Errors |= ValidationErrorIssuedAt
- }
-
- if !m.VerifyNotBefore(now, false) {
- vErr.Inner = errors.New("Token is not valid yet")
- vErr.Errors |= ValidationErrorNotValidYet
- }
-
- if vErr.valid() {
- return nil
- }
-
- return vErr
-}
diff --git a/vendor/github.com/golang-jwt/jwt/none.go b/vendor/github.com/golang-jwt/jwt/none.go
deleted file mode 100644
index f04d189d..00000000
--- a/vendor/github.com/golang-jwt/jwt/none.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package jwt
-
-// Implements the none signing method. This is required by the spec
-// but you probably should never use it.
-var SigningMethodNone *signingMethodNone
-
-const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
-
-var NoneSignatureTypeDisallowedError error
-
-type signingMethodNone struct{}
-type unsafeNoneMagicConstant string
-
-func init() {
- SigningMethodNone = &signingMethodNone{}
- NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
-
- RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
- return SigningMethodNone
- })
-}
-
-func (m *signingMethodNone) Alg() string {
- return "none"
-}
-
-// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
-func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
- // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
- // accepting 'none' signing method
- if _, ok := key.(unsafeNoneMagicConstant); !ok {
- return NoneSignatureTypeDisallowedError
- }
- // If signing method is none, signature must be an empty string
- if signature != "" {
- return NewValidationError(
- "'none' signing method with non-empty signature",
- ValidationErrorSignatureInvalid,
- )
- }
-
- // Accept 'none' signing method.
- return nil
-}
-
-// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
-func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
- if _, ok := key.(unsafeNoneMagicConstant); ok {
- return "", nil
- }
- return "", NoneSignatureTypeDisallowedError
-}
diff --git a/vendor/github.com/golang-jwt/jwt/parser.go b/vendor/github.com/golang-jwt/jwt/parser.go
deleted file mode 100644
index d6901d9a..00000000
--- a/vendor/github.com/golang-jwt/jwt/parser.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package jwt
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "strings"
-)
-
-type Parser struct {
- ValidMethods []string // If populated, only these methods will be considered valid
- UseJSONNumber bool // Use JSON Number format in JSON decoder
- SkipClaimsValidation bool // Skip claims validation during token parsing
-}
-
-// Parse, validate, and return a token.
-// keyFunc will receive the parsed token and should return the key for validating.
-// If everything is kosher, err will be nil
-func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
- return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
-}
-
-func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
- token, parts, err := p.ParseUnverified(tokenString, claims)
- if err != nil {
- return token, err
- }
-
- // Verify signing method is in the required set
- if p.ValidMethods != nil {
- var signingMethodValid = false
- var alg = token.Method.Alg()
- for _, m := range p.ValidMethods {
- if m == alg {
- signingMethodValid = true
- break
- }
- }
- if !signingMethodValid {
- // signing method is not in the listed set
- return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
- }
- }
-
- // Lookup key
- var key interface{}
- if keyFunc == nil {
- // keyFunc was not provided. short circuiting validation
- return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
- }
- if key, err = keyFunc(token); err != nil {
- // keyFunc returned an error
- if ve, ok := err.(*ValidationError); ok {
- return token, ve
- }
- return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
- }
-
- vErr := &ValidationError{}
-
- // Validate Claims
- if !p.SkipClaimsValidation {
- if err := token.Claims.Valid(); err != nil {
-
- // If the Claims Valid returned an error, check if it is a validation error,
- // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
- if e, ok := err.(*ValidationError); !ok {
- vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
- } else {
- vErr = e
- }
- }
- }
-
- // Perform validation
- token.Signature = parts[2]
- if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
- vErr.Inner = err
- vErr.Errors |= ValidationErrorSignatureInvalid
- }
-
- if vErr.valid() {
- token.Valid = true
- return token, nil
- }
-
- return token, vErr
-}
-
-// WARNING: Don't use this method unless you know what you're doing
-//
-// This method parses the token but doesn't validate the signature. It's only
-// ever useful in cases where you know the signature is valid (because it has
-// been checked previously in the stack) and you want to extract values from
-// it.
-func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
- parts = strings.Split(tokenString, ".")
- if len(parts) != 3 {
- return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
- }
-
- token = &Token{Raw: tokenString}
-
- // parse Header
- var headerBytes []byte
- if headerBytes, err = DecodeSegment(parts[0]); err != nil {
- if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
- return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
- }
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
- if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
-
- // parse Claims
- var claimBytes []byte
- token.Claims = claims
-
- if claimBytes, err = DecodeSegment(parts[1]); err != nil {
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
- dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
- if p.UseJSONNumber {
- dec.UseNumber()
- }
- // JSON Decode. Special case for map type to avoid weird pointer behavior
- if c, ok := token.Claims.(MapClaims); ok {
- err = dec.Decode(&c)
- } else {
- err = dec.Decode(&claims)
- }
- // Handle decode error
- if err != nil {
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
-
- // Lookup signature method
- if method, ok := token.Header["alg"].(string); ok {
- if token.Method = GetSigningMethod(method); token.Method == nil {
- return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
- }
- } else {
- return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
- }
-
- return token, parts, nil
-}
diff --git a/vendor/github.com/golang-jwt/jwt/rsa.go b/vendor/github.com/golang-jwt/jwt/rsa.go
deleted file mode 100644
index e4caf1ca..00000000
--- a/vendor/github.com/golang-jwt/jwt/rsa.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/rsa"
-)
-
-// Implements the RSA family of signing methods signing methods
-// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
-type SigningMethodRSA struct {
- Name string
- Hash crypto.Hash
-}
-
-// Specific instances for RS256 and company
-var (
- SigningMethodRS256 *SigningMethodRSA
- SigningMethodRS384 *SigningMethodRSA
- SigningMethodRS512 *SigningMethodRSA
-)
-
-func init() {
- // RS256
- SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
- RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
- return SigningMethodRS256
- })
-
- // RS384
- SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
- RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
- return SigningMethodRS384
- })
-
- // RS512
- SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
- RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
- return SigningMethodRS512
- })
-}
-
-func (m *SigningMethodRSA) Alg() string {
- return m.Name
-}
-
-// Implements the Verify method from SigningMethod
-// For this signing method, must be an *rsa.PublicKey structure.
-func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
- var err error
-
- // Decode the signature
- var sig []byte
- if sig, err = DecodeSegment(signature); err != nil {
- return err
- }
-
- var rsaKey *rsa.PublicKey
- var ok bool
-
- if rsaKey, ok = key.(*rsa.PublicKey); !ok {
- return ErrInvalidKeyType
- }
-
- // Create hasher
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Verify the signature
- return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
-}
-
-// Implements the Sign method from SigningMethod
-// For this signing method, must be an *rsa.PrivateKey structure.
-func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
- var rsaKey *rsa.PrivateKey
- var ok bool
-
- // Validate type of key
- if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
- return "", ErrInvalidKey
- }
-
- // Create the hasher
- if !m.Hash.Available() {
- return "", ErrHashUnavailable
- }
-
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Sign the string and return the encoded bytes
- if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
- return EncodeSegment(sigBytes), nil
- } else {
- return "", err
- }
-}
diff --git a/vendor/github.com/golang-jwt/jwt/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/rsa_pss.go
deleted file mode 100644
index c0147086..00000000
--- a/vendor/github.com/golang-jwt/jwt/rsa_pss.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// +build go1.4
-
-package jwt
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/rsa"
-)
-
-// Implements the RSAPSS family of signing methods signing methods
-type SigningMethodRSAPSS struct {
- *SigningMethodRSA
- Options *rsa.PSSOptions
- // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
- // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
- // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
- // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
- VerifyOptions *rsa.PSSOptions
-}
-
-// Specific instances for RS/PS and company.
-var (
- SigningMethodPS256 *SigningMethodRSAPSS
- SigningMethodPS384 *SigningMethodRSAPSS
- SigningMethodPS512 *SigningMethodRSAPSS
-)
-
-func init() {
- // PS256
- SigningMethodPS256 = &SigningMethodRSAPSS{
- SigningMethodRSA: &SigningMethodRSA{
- Name: "PS256",
- Hash: crypto.SHA256,
- },
- Options: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthEqualsHash,
- },
- VerifyOptions: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthAuto,
- },
- }
- RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
- return SigningMethodPS256
- })
-
- // PS384
- SigningMethodPS384 = &SigningMethodRSAPSS{
- SigningMethodRSA: &SigningMethodRSA{
- Name: "PS384",
- Hash: crypto.SHA384,
- },
- Options: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthEqualsHash,
- },
- VerifyOptions: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthAuto,
- },
- }
- RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
- return SigningMethodPS384
- })
-
- // PS512
- SigningMethodPS512 = &SigningMethodRSAPSS{
- SigningMethodRSA: &SigningMethodRSA{
- Name: "PS512",
- Hash: crypto.SHA512,
- },
- Options: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthEqualsHash,
- },
- VerifyOptions: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthAuto,
- },
- }
- RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
- return SigningMethodPS512
- })
-}
-
-// Implements the Verify method from SigningMethod
-// For this verify method, key must be an rsa.PublicKey struct
-func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
- var err error
-
- // Decode the signature
- var sig []byte
- if sig, err = DecodeSegment(signature); err != nil {
- return err
- }
-
- var rsaKey *rsa.PublicKey
- switch k := key.(type) {
- case *rsa.PublicKey:
- rsaKey = k
- default:
- return ErrInvalidKey
- }
-
- // Create hasher
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- opts := m.Options
- if m.VerifyOptions != nil {
- opts = m.VerifyOptions
- }
-
- return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
-}
-
-// Implements the Sign method from SigningMethod
-// For this signing method, key must be an rsa.PrivateKey struct
-func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
- var rsaKey *rsa.PrivateKey
-
- switch k := key.(type) {
- case *rsa.PrivateKey:
- rsaKey = k
- default:
- return "", ErrInvalidKeyType
- }
-
- // Create the hasher
- if !m.Hash.Available() {
- return "", ErrHashUnavailable
- }
-
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Sign the string and return the encoded bytes
- if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
- return EncodeSegment(sigBytes), nil
- } else {
- return "", err
- }
-}
diff --git a/vendor/github.com/golang-jwt/jwt/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/rsa_utils.go
deleted file mode 100644
index 14c78c29..00000000
--- a/vendor/github.com/golang-jwt/jwt/rsa_utils.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package jwt
-
-import (
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "errors"
-)
-
-var (
- ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key")
- ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
- ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
-)
-
-// Parse PEM encoded PKCS1 or PKCS8 private key
-func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
- return nil, err
- }
- }
-
- var pkey *rsa.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
- return nil, ErrNotRSAPrivateKey
- }
-
- return pkey, nil
-}
-
-// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
-func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- var parsedKey interface{}
-
- var blockDecrypted []byte
- if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
- return nil, err
- }
-
- if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
- return nil, err
- }
- }
-
- var pkey *rsa.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
- return nil, ErrNotRSAPrivateKey
- }
-
- return pkey, nil
-}
-
-// Parse PEM encoded PKCS1 or PKCS8 public key
-func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
- if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
- parsedKey = cert.PublicKey
- } else {
- return nil, err
- }
- }
-
- var pkey *rsa.PublicKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
- return nil, ErrNotRSAPublicKey
- }
-
- return pkey, nil
-}
diff --git a/vendor/github.com/golang-jwt/jwt/signing_method.go b/vendor/github.com/golang-jwt/jwt/signing_method.go
deleted file mode 100644
index ed1f212b..00000000
--- a/vendor/github.com/golang-jwt/jwt/signing_method.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package jwt
-
-import (
- "sync"
-)
-
-var signingMethods = map[string]func() SigningMethod{}
-var signingMethodLock = new(sync.RWMutex)
-
-// Implement SigningMethod to add new methods for signing or verifying tokens.
-type SigningMethod interface {
- Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
- Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
- Alg() string // returns the alg identifier for this method (example: 'HS256')
-}
-
-// Register the "alg" name and a factory function for signing method.
-// This is typically done during init() in the method's implementation
-func RegisterSigningMethod(alg string, f func() SigningMethod) {
- signingMethodLock.Lock()
- defer signingMethodLock.Unlock()
-
- signingMethods[alg] = f
-}
-
-// Get a signing method from an "alg" string
-func GetSigningMethod(alg string) (method SigningMethod) {
- signingMethodLock.RLock()
- defer signingMethodLock.RUnlock()
-
- if methodF, ok := signingMethods[alg]; ok {
- method = methodF()
- }
- return
-}
diff --git a/vendor/github.com/golang-jwt/jwt/token.go b/vendor/github.com/golang-jwt/jwt/token.go
deleted file mode 100644
index 6b30ced1..00000000
--- a/vendor/github.com/golang-jwt/jwt/token.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package jwt
-
-import (
- "encoding/base64"
- "encoding/json"
- "strings"
- "time"
-)
-
-// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
-// You can override it to use another time value. This is useful for testing or if your
-// server uses a different time zone than your tokens.
-var TimeFunc = time.Now
-
-// Parse methods use this callback function to supply
-// the key for verification. The function receives the parsed,
-// but unverified Token. This allows you to use properties in the
-// Header of the token (such as `kid`) to identify which key to use.
-type Keyfunc func(*Token) (interface{}, error)
-
-// A JWT Token. Different fields will be used depending on whether you're
-// creating or parsing/verifying a token.
-type Token struct {
- Raw string // The raw token. Populated when you Parse a token
- Method SigningMethod // The signing method used or to be used
- Header map[string]interface{} // The first segment of the token
- Claims Claims // The second segment of the token
- Signature string // The third segment of the token. Populated when you Parse a token
- Valid bool // Is the token valid? Populated when you Parse/Verify a token
-}
-
-// Create a new Token. Takes a signing method
-func New(method SigningMethod) *Token {
- return NewWithClaims(method, MapClaims{})
-}
-
-func NewWithClaims(method SigningMethod, claims Claims) *Token {
- return &Token{
- Header: map[string]interface{}{
- "typ": "JWT",
- "alg": method.Alg(),
- },
- Claims: claims,
- Method: method,
- }
-}
-
-// Get the complete, signed token
-func (t *Token) SignedString(key interface{}) (string, error) {
- var sig, sstr string
- var err error
- if sstr, err = t.SigningString(); err != nil {
- return "", err
- }
- if sig, err = t.Method.Sign(sstr, key); err != nil {
- return "", err
- }
- return strings.Join([]string{sstr, sig}, "."), nil
-}
-
-// Generate the signing string. This is the
-// most expensive part of the whole deal. Unless you
-// need this for something special, just go straight for
-// the SignedString.
-func (t *Token) SigningString() (string, error) {
- var err error
- parts := make([]string, 2)
- for i := range parts {
- var jsonValue []byte
- if i == 0 {
- if jsonValue, err = json.Marshal(t.Header); err != nil {
- return "", err
- }
- } else {
- if jsonValue, err = json.Marshal(t.Claims); err != nil {
- return "", err
- }
- }
-
- parts[i] = EncodeSegment(jsonValue)
- }
- return strings.Join(parts, "."), nil
-}
-
-// Parse, validate, and return a token.
-// keyFunc will receive the parsed token and should return the key for validating.
-// If everything is kosher, err will be nil
-func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
- return new(Parser).Parse(tokenString, keyFunc)
-}
-
-func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
- return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
-}
-
-// Encode JWT specific base64url encoding with padding stripped
-func EncodeSegment(seg []byte) string {
- return base64.RawURLEncoding.EncodeToString(seg)
-}
-
-// Decode JWT specific base64url encoding with padding stripped
-func DecodeSegment(seg string) ([]byte, error) {
- return base64.RawURLEncoding.DecodeString(seg)
-}
diff --git a/vendor/github.com/golang-jwt/jwt/.gitignore b/vendor/github.com/golang-jwt/jwt/v4/.gitignore
similarity index 100%
rename from vendor/github.com/golang-jwt/jwt/.gitignore
rename to vendor/github.com/golang-jwt/jwt/v4/.gitignore
diff --git a/vendor/github.com/golang-jwt/jwt/LICENSE b/vendor/github.com/golang-jwt/jwt/v4/LICENSE
similarity index 100%
rename from vendor/github.com/golang-jwt/jwt/LICENSE
rename to vendor/github.com/golang-jwt/jwt/v4/LICENSE
diff --git a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md
new file mode 100644
index 00000000..32966f59
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md
@@ -0,0 +1,22 @@
+## Migration Guide (v4.0.0)
+
+Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), the import path will be:
+
+ "github.com/golang-jwt/jwt/v4"
+
+The `/v4` version will be backwards compatible with existing `v3.x.y` tags in this repo, as well as
+`github.com/dgrijalva/jwt-go`. For most users this should be a drop-in replacement, if you're having
+troubles migrating, please open an issue.
+
+You can replace all occurrences of `github.com/dgrijalva/jwt-go` or `github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`.
+
+And then you'd typically run:
+
+```
+go get github.com/golang-jwt/jwt/v4
+go mod tidy
+```
+
+## Older releases (before v3.2.0)
+
+The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md
new file mode 100644
index 00000000..30f2f2a6
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/README.md
@@ -0,0 +1,138 @@
+# jwt-go
+
+[](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
+[](https://pkg.go.dev/github.com/golang-jwt/jwt/v4)
+
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
+
+Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`.
+See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information.
+
+> After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic.
+
+
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
+
+### Supported Go versions
+
+Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy).
+So we will support a major version of Go until there are two newer major releases.
+We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities
+which will not be fixed.
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
+
+In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way.
+
+The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
+
+The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Installation Guidelines
+
+1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program.
+
+```sh
+go get -u github.com/golang-jwt/jwt/v4
+```
+
+2. Import it in your code:
+
+```go
+import "github.com/golang-jwt/jwt/v4"
+```
+
+## Examples
+
+See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage:
+
+* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-Parse-Hmac)
+* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-New-Hmac)
+* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#pkg-examples)
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`.
+
+A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards.
+
+| Extension | Purpose | Repo |
+| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
+| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
+| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
+| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
+
+*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers
+
+## Compliance
+
+This library was last reviewed to comply with [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences:
+
+* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases).
+
+**BREAKING CHANGES:***
+A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
+
+## Usage Tips
+
+### Signing vs Encryption
+
+A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
+
+* The author of the token was in the possession of the signing secret
+* The data has not been modified since it was signed
+
+It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. The companion project https://github.com/golang-jwt/jwe aims at a (very) experimental implementation of the JWE standard.
+
+### Choosing a Signing Method
+
+There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
+
+Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
+
+Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
+
+### Signing Methods and Key Types
+
+Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
+
+* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
+* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
+* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
+* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation
+
+### JWT and OAuth
+
+It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
+
+Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
+
+* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
+* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
+* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
+
+### Troubleshooting
+
+This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types.
+
+## More
+
+Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v4).
+
+The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
+
+[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt).
diff --git a/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md
new file mode 100644
index 00000000..b08402c3
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+As of February 2022 (and until this document is updated), the latest version `v4` is supported.
+
+## Reporting a Vulnerability
+
+If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s).
+
+You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
+
+## Public Discussions
+
+Please avoid publicly discussing a potential security vulnerability.
+
+Let's take this offline and find a solution first, this limits the potential impact as much as possible.
+
+We appreciate your help!
diff --git a/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md
new file mode 100644
index 00000000..afbfc4e4
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md
@@ -0,0 +1,135 @@
+## `jwt-go` Version History
+
+#### 4.0.0
+
+* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`.
+
+#### 3.2.2
+
+* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
+* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)).
+* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)).
+* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)).
+
+#### 3.2.1
+
+* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`
+* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160
+
+#### 3.2.0
+
+* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
+* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
+* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
+* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
+
+#### 3.1.0
+
+* Improvements to `jwt` command line tool
+* Added `SkipClaimsValidation` option to `Parser`
+* Documentation updates
+
+#### 3.0.0
+
+* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
+ * `ParseFromRequest` has been moved to `request` subpackage and usage has changed
+ * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
+* Other Additions and Changes
+ * Added `Claims` interface type to allow users to decode the claims into a custom type
+ * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
+ * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
+ * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
+ * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
+ * Added several new, more specific, validation errors to error type bitmask
+ * Moved examples from README to executable example files
+ * Signing method registry is now thread safe
+ * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
+
+#### 2.7.0
+
+This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
+
+* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
+* Error text for expired tokens includes how long it's been expired
+* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
+* Documentation updates
+
+#### 2.6.0
+
+* Exposed inner error within ValidationError
+* Fixed validation errors when using UseJSONNumber flag
+* Added several unit tests
+
+#### 2.5.0
+
+* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
+* Updated/fixed some documentation
+* Added more helpful error message when trying to parse tokens that begin with `BEARER `
+
+#### 2.4.0
+
+* Added new type, Parser, to allow for configuration of various parsing parameters
+ * You can now specify a list of valid signing methods. Anything outside this set will be rejected.
+ * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
+* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
+* Fixed some bugs with ECDSA parsing
+
+#### 2.3.0
+
+* Added support for ECDSA signing methods
+* Added support for RSA PSS signing methods (requires go v1.4)
+
+#### 2.2.0
+
+* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
+
+#### 2.1.0
+
+Backwards compatible API change that was missed in 2.0.0.
+
+* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
+
+#### 2.0.0
+
+There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
+
+The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
+
+It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
+
+* **Compatibility Breaking Changes**
+ * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
+ * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
+ * `KeyFunc` now returns `interface{}` instead of `[]byte`
+ * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
+ * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
+* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodHS256`
+ * Added public package global `SigningMethodHS384`
+ * Added public package global `SigningMethodHS512`
+* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodRS256`
+ * Added public package global `SigningMethodRS384`
+ * Added public package global `SigningMethodRS512`
+* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
+* Refactored the RSA implementation to be easier to read
+* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
+
+#### 1.0.2
+
+* Fixed bug in parsing public keys from certificates
+* Added more tests around the parsing of keys for RS256
+* Code refactoring in RS256 implementation. No functional changes
+
+#### 1.0.1
+
+* Fixed panic if RS256 signing method was passed an invalid key
+
+#### 1.0.0
+
+* First versioned release
+* API stabilized
+* Supports creating, signing, parsing, and validating JWT tokens
+* Supports RS256 and HS256 signing methods
diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go
new file mode 100644
index 00000000..364cec87
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go
@@ -0,0 +1,269 @@
+package jwt
+
+import (
+ "crypto/subtle"
+ "fmt"
+ "time"
+)
+
+// Claims must just have a Valid method that determines
+// if the token is invalid for any supported reason
+type Claims interface {
+ Valid() error
+}
+
+// RegisteredClaims are a structured version of the JWT Claims Set,
+// restricted to Registered Claim Names, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1
+//
+// This type can be used on its own, but then additional private and
+// public claims embedded in the JWT will not be parsed. The typical usecase
+// therefore is to embedded this in a user-defined claim type.
+//
+// See examples for how to use this with your own claim types.
+type RegisteredClaims struct {
+ // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1
+ Issuer string `json:"iss,omitempty"`
+
+ // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2
+ Subject string `json:"sub,omitempty"`
+
+ // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3
+ Audience ClaimStrings `json:"aud,omitempty"`
+
+ // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4
+ ExpiresAt *NumericDate `json:"exp,omitempty"`
+
+ // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5
+ NotBefore *NumericDate `json:"nbf,omitempty"`
+
+ // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6
+ IssuedAt *NumericDate `json:"iat,omitempty"`
+
+ // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7
+ ID string `json:"jti,omitempty"`
+}
+
+// Valid validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (c RegisteredClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc()
+
+ // The claims below are optional, by default, so if they are set to the
+ // default value in Go, let's not fail the verification for them.
+ if !c.VerifyExpiresAt(now, false) {
+ delta := now.Sub(c.ExpiresAt.Time)
+ vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if !c.VerifyIssuedAt(now, false) {
+ vErr.Inner = ErrTokenUsedBeforeIssued
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if !c.VerifyNotBefore(now, false) {
+ vErr.Inner = ErrTokenNotValidYet
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
+
+// VerifyAudience compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool {
+ return verifyAud(c.Audience, cmp, req)
+}
+
+// VerifyExpiresAt compares the exp claim against cmp (cmp < exp).
+// If req is false, it will return true, if exp is unset.
+func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool {
+ if c.ExpiresAt == nil {
+ return verifyExp(nil, cmp, req)
+ }
+
+ return verifyExp(&c.ExpiresAt.Time, cmp, req)
+}
+
+// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat).
+// If req is false, it will return true, if iat is unset.
+func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool {
+ if c.IssuedAt == nil {
+ return verifyIat(nil, cmp, req)
+ }
+
+ return verifyIat(&c.IssuedAt.Time, cmp, req)
+}
+
+// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+// If req is false, it will return true, if nbf is unset.
+func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool {
+ if c.NotBefore == nil {
+ return verifyNbf(nil, cmp, req)
+ }
+
+ return verifyNbf(&c.NotBefore.Time, cmp, req)
+}
+
+// VerifyIssuer compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *RegisteredClaims) VerifyIssuer(cmp string, req bool) bool {
+ return verifyIss(c.Issuer, cmp, req)
+}
+
+// StandardClaims are a structured version of the JWT Claims Set, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the
+// specification exactly, since they were based on an earlier draft of the
+// specification and not updated. The main difference is that they only
+// support integer-based date fields and singular audiences. This might lead to
+// incompatibilities with other JWT implementations. The use of this is discouraged, instead
+// the newer RegisteredClaims struct should be used.
+//
+// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct.
+type StandardClaims struct {
+ Audience string `json:"aud,omitempty"`
+ ExpiresAt int64 `json:"exp,omitempty"`
+ Id string `json:"jti,omitempty"`
+ IssuedAt int64 `json:"iat,omitempty"`
+ Issuer string `json:"iss,omitempty"`
+ NotBefore int64 `json:"nbf,omitempty"`
+ Subject string `json:"sub,omitempty"`
+}
+
+// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (c StandardClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc().Unix()
+
+ // The claims below are optional, by default, so if they are set to the
+ // default value in Go, let's not fail the verification for them.
+ if !c.VerifyExpiresAt(now, false) {
+ delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
+ vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if !c.VerifyIssuedAt(now, false) {
+ vErr.Inner = ErrTokenUsedBeforeIssued
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if !c.VerifyNotBefore(now, false) {
+ vErr.Inner = ErrTokenNotValidYet
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
+
+// VerifyAudience compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
+ return verifyAud([]string{c.Audience}, cmp, req)
+}
+
+// VerifyExpiresAt compares the exp claim against cmp (cmp < exp).
+// If req is false, it will return true, if exp is unset.
+func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+ if c.ExpiresAt == 0 {
+ return verifyExp(nil, time.Unix(cmp, 0), req)
+ }
+
+ t := time.Unix(c.ExpiresAt, 0)
+ return verifyExp(&t, time.Unix(cmp, 0), req)
+}
+
+// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat).
+// If req is false, it will return true, if iat is unset.
+func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+ if c.IssuedAt == 0 {
+ return verifyIat(nil, time.Unix(cmp, 0), req)
+ }
+
+ t := time.Unix(c.IssuedAt, 0)
+ return verifyIat(&t, time.Unix(cmp, 0), req)
+}
+
+// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+// If req is false, it will return true, if nbf is unset.
+func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
+ if c.NotBefore == 0 {
+ return verifyNbf(nil, time.Unix(cmp, 0), req)
+ }
+
+ t := time.Unix(c.NotBefore, 0)
+ return verifyNbf(&t, time.Unix(cmp, 0), req)
+}
+
+// VerifyIssuer compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
+ return verifyIss(c.Issuer, cmp, req)
+}
+
+// ----- helpers
+
+func verifyAud(aud []string, cmp string, required bool) bool {
+ if len(aud) == 0 {
+ return !required
+ }
+ // use a var here to keep constant time compare when looping over a number of claims
+ result := false
+
+ var stringClaims string
+ for _, a := range aud {
+ if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
+ result = true
+ }
+ stringClaims = stringClaims + a
+ }
+
+ // case where "" is sent in one or many aud claims
+ if len(stringClaims) == 0 {
+ return !required
+ }
+
+ return result
+}
+
+func verifyExp(exp *time.Time, now time.Time, required bool) bool {
+ if exp == nil {
+ return !required
+ }
+ return now.Before(*exp)
+}
+
+func verifyIat(iat *time.Time, now time.Time, required bool) bool {
+ if iat == nil {
+ return !required
+ }
+ return now.After(*iat) || now.Equal(*iat)
+}
+
+func verifyNbf(nbf *time.Time, now time.Time, required bool) bool {
+ if nbf == nil {
+ return !required
+ }
+ return now.After(*nbf) || now.Equal(*nbf)
+}
+
+func verifyIss(iss string, cmp string, required bool) bool {
+ if iss == "" {
+ return !required
+ }
+ return subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0
+}
diff --git a/vendor/github.com/golang-jwt/jwt/doc.go b/vendor/github.com/golang-jwt/jwt/v4/doc.go
similarity index 100%
rename from vendor/github.com/golang-jwt/jwt/doc.go
rename to vendor/github.com/golang-jwt/jwt/v4/doc.go
diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
new file mode 100644
index 00000000..eac023fc
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
@@ -0,0 +1,142 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "errors"
+ "math/big"
+)
+
+var (
+ // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+ ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
+)
+
+// SigningMethodECDSA implements the ECDSA family of signing methods.
+// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+type SigningMethodECDSA struct {
+ Name string
+ Hash crypto.Hash
+ KeySize int
+ CurveBits int
+}
+
+// Specific instances for EC256 and company
+var (
+ SigningMethodES256 *SigningMethodECDSA
+ SigningMethodES384 *SigningMethodECDSA
+ SigningMethodES512 *SigningMethodECDSA
+)
+
+func init() {
+ // ES256
+ SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+ RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+ return SigningMethodES256
+ })
+
+ // ES384
+ SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+ RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+ return SigningMethodES384
+ })
+
+ // ES512
+ SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+ RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+ return SigningMethodES512
+ })
+}
+
+func (m *SigningMethodECDSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ecdsa.PublicKey struct
+func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ // Get the key
+ var ecdsaKey *ecdsa.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ ecdsaKey = k
+ default:
+ return ErrInvalidKeyType
+ }
+
+ if len(sig) != 2*m.KeySize {
+ return ErrECDSAVerification
+ }
+
+ r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+ s := big.NewInt(0).SetBytes(sig[m.KeySize:])
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
+ return nil
+ }
+
+ return ErrECDSAVerification
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ecdsa.PrivateKey struct
+func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
+ // Get the key
+ var ecdsaKey *ecdsa.PrivateKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ ecdsaKey = k
+ default:
+ return "", ErrInvalidKeyType
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return r, s
+ if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+ curveBits := ecdsaKey.Curve.Params().BitSize
+
+ if m.CurveBits != curveBits {
+ return "", ErrInvalidKey
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes += 1
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays
+ // padded with zeros on the left to make sure the sizes work out.
+ // Output must be 2*keyBytes long.
+ out := make([]byte, 2*keyBytes)
+ r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
+ s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
+
+ return EncodeSegment(out), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
new file mode 100644
index 00000000..5700636d
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
@@ -0,0 +1,69 @@
+package jwt
+
+import (
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+ ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
+)
+
+// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
+func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+ return nil, ErrNotECPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+ return nil, ErrNotECPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go
new file mode 100644
index 00000000..07d3aacd
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go
@@ -0,0 +1,85 @@
+package jwt
+
+import (
+ "errors"
+
+ "crypto"
+ "crypto/ed25519"
+ "crypto/rand"
+)
+
+var (
+ ErrEd25519Verification = errors.New("ed25519: verification error")
+)
+
+// SigningMethodEd25519 implements the EdDSA family.
+// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+type SigningMethodEd25519 struct{}
+
+// Specific instance for EdDSA
+var (
+ SigningMethodEdDSA *SigningMethodEd25519
+)
+
+func init() {
+ SigningMethodEdDSA = &SigningMethodEd25519{}
+ RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+ return SigningMethodEdDSA
+ })
+}
+
+func (m *SigningMethodEd25519) Alg() string {
+ return "EdDSA"
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ed25519.PublicKey
+func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error {
+ var err error
+ var ed25519Key ed25519.PublicKey
+ var ok bool
+
+ if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+ return ErrInvalidKeyType
+ }
+
+ if len(ed25519Key) != ed25519.PublicKeySize {
+ return ErrInvalidKey
+ }
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ // Verify the signature
+ if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+ return ErrEd25519Verification
+ }
+
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ed25519.PrivateKey
+func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) {
+ var ed25519Key crypto.Signer
+ var ok bool
+
+ if ed25519Key, ok = key.(crypto.Signer); !ok {
+ return "", ErrInvalidKeyType
+ }
+
+ if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
+ return "", ErrInvalidKey
+ }
+
+ // Sign the string and return the encoded result
+ // ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0)
+ sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0))
+ if err != nil {
+ return "", err
+ }
+ return EncodeSegment(sig), nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go
new file mode 100644
index 00000000..cdb5e68e
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go
@@ -0,0 +1,64 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
+ ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
+)
+
+// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
+func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+ return nil, ErrNotEdPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
+func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+ return nil, ErrNotEdPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go
new file mode 100644
index 00000000..10ac8835
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go
@@ -0,0 +1,112 @@
+package jwt
+
+import (
+ "errors"
+)
+
+// Error constants
+var (
+ ErrInvalidKey = errors.New("key is invalid")
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+
+ ErrTokenMalformed = errors.New("token is malformed")
+ ErrTokenUnverifiable = errors.New("token is unverifiable")
+ ErrTokenSignatureInvalid = errors.New("token signature is invalid")
+
+ ErrTokenInvalidAudience = errors.New("token has invalid audience")
+ ErrTokenExpired = errors.New("token is expired")
+ ErrTokenUsedBeforeIssued = errors.New("token used before issued")
+ ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
+ ErrTokenNotValidYet = errors.New("token is not valid yet")
+ ErrTokenInvalidId = errors.New("token has invalid id")
+ ErrTokenInvalidClaims = errors.New("token has invalid claims")
+)
+
+// The errors that might occur when parsing and validating a token
+const (
+ ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
+ ValidationErrorUnverifiable // Token could not be verified because of signing problems
+ ValidationErrorSignatureInvalid // Signature validation failed
+
+ // Standard Claim validation errors
+ ValidationErrorAudience // AUD validation failed
+ ValidationErrorExpired // EXP validation failed
+ ValidationErrorIssuedAt // IAT validation failed
+ ValidationErrorIssuer // ISS validation failed
+ ValidationErrorNotValidYet // NBF validation failed
+ ValidationErrorId // JTI validation failed
+ ValidationErrorClaimsInvalid // Generic claims validation error
+)
+
+// NewValidationError is a helper for constructing a ValidationError with a string error message
+func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
+ return &ValidationError{
+ text: errorText,
+ Errors: errorFlags,
+ }
+}
+
+// ValidationError represents an error from Parse if token is not valid
+type ValidationError struct {
+ Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
+ Errors uint32 // bitfield. see ValidationError... constants
+ text string // errors that do not have a valid error just have text
+}
+
+// Error is the implementation of the err interface.
+func (e ValidationError) Error() string {
+ if e.Inner != nil {
+ return e.Inner.Error()
+ } else if e.text != "" {
+ return e.text
+ } else {
+ return "token is invalid"
+ }
+}
+
+// Unwrap gives errors.Is and errors.As access to the inner error.
+func (e *ValidationError) Unwrap() error {
+ return e.Inner
+}
+
+// No errors
+func (e *ValidationError) valid() bool {
+ return e.Errors == 0
+}
+
+// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message
+// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use
+// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables.
+func (e *ValidationError) Is(err error) bool {
+ // Check, if our inner error is a direct match
+ if errors.Is(errors.Unwrap(e), err) {
+ return true
+ }
+
+ // Otherwise, we need to match using our error flags
+ switch err {
+ case ErrTokenMalformed:
+ return e.Errors&ValidationErrorMalformed != 0
+ case ErrTokenUnverifiable:
+ return e.Errors&ValidationErrorUnverifiable != 0
+ case ErrTokenSignatureInvalid:
+ return e.Errors&ValidationErrorSignatureInvalid != 0
+ case ErrTokenInvalidAudience:
+ return e.Errors&ValidationErrorAudience != 0
+ case ErrTokenExpired:
+ return e.Errors&ValidationErrorExpired != 0
+ case ErrTokenUsedBeforeIssued:
+ return e.Errors&ValidationErrorIssuedAt != 0
+ case ErrTokenInvalidIssuer:
+ return e.Errors&ValidationErrorIssuer != 0
+ case ErrTokenNotValidYet:
+ return e.Errors&ValidationErrorNotValidYet != 0
+ case ErrTokenInvalidId:
+ return e.Errors&ValidationErrorId != 0
+ case ErrTokenInvalidClaims:
+ return e.Errors&ValidationErrorClaimsInvalid != 0
+ }
+
+ return false
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/hmac.go b/vendor/github.com/golang-jwt/jwt/v4/hmac.go
new file mode 100644
index 00000000..011f68a2
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/hmac.go
@@ -0,0 +1,95 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/hmac"
+ "errors"
+)
+
+// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
+// Expects key type of []byte for both signing and validation
+type SigningMethodHMAC struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for HS256 and company
+var (
+ SigningMethodHS256 *SigningMethodHMAC
+ SigningMethodHS384 *SigningMethodHMAC
+ SigningMethodHS512 *SigningMethodHMAC
+ ErrSignatureInvalid = errors.New("signature is invalid")
+)
+
+func init() {
+ // HS256
+ SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+ return SigningMethodHS256
+ })
+
+ // HS384
+ SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+ return SigningMethodHS384
+ })
+
+ // HS512
+ SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+ return SigningMethodHS512
+ })
+}
+
+func (m *SigningMethodHMAC) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid.
+func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
+ // Verify the key is the right type
+ keyBytes, ok := key.([]byte)
+ if !ok {
+ return ErrInvalidKeyType
+ }
+
+ // Decode signature, for comparison
+ sig, err := DecodeSegment(signature)
+ if err != nil {
+ return err
+ }
+
+ // Can we use the specified hashing method?
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+
+ // This signing method is symmetric, so we validate the signature
+ // by reproducing the signature from the signing string and key, then
+ // comparing that against the provided signature.
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+ if !hmac.Equal(sig, hasher.Sum(nil)) {
+ return ErrSignatureInvalid
+ }
+
+ // No validation errors. Signature is good.
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod.
+// Key must be []byte
+func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
+ if keyBytes, ok := key.([]byte); ok {
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+
+ return EncodeSegment(hasher.Sum(nil)), nil
+ }
+
+ return "", ErrInvalidKeyType
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
new file mode 100644
index 00000000..2700d64a
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go
@@ -0,0 +1,151 @@
+package jwt
+
+import (
+ "encoding/json"
+ "errors"
+ "time"
+ // "fmt"
+)
+
+// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding.
+// This is the default claims type if you don't supply one
+type MapClaims map[string]interface{}
+
+// VerifyAudience Compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
+ var aud []string
+ switch v := m["aud"].(type) {
+ case string:
+ aud = append(aud, v)
+ case []string:
+ aud = v
+ case []interface{}:
+ for _, a := range v {
+ vs, ok := a.(string)
+ if !ok {
+ return false
+ }
+ aud = append(aud, vs)
+ }
+ }
+ return verifyAud(aud, cmp, req)
+}
+
+// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp).
+// If req is false, it will return true, if exp is unset.
+func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+ cmpTime := time.Unix(cmp, 0)
+
+ v, ok := m["exp"]
+ if !ok {
+ return !req
+ }
+
+ switch exp := v.(type) {
+ case float64:
+ if exp == 0 {
+ return verifyExp(nil, cmpTime, req)
+ }
+
+ return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req)
+ case json.Number:
+ v, _ := exp.Float64()
+
+ return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req)
+ }
+
+ return false
+}
+
+// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat).
+// If req is false, it will return true, if iat is unset.
+func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+ cmpTime := time.Unix(cmp, 0)
+
+ v, ok := m["iat"]
+ if !ok {
+ return !req
+ }
+
+ switch iat := v.(type) {
+ case float64:
+ if iat == 0 {
+ return verifyIat(nil, cmpTime, req)
+ }
+
+ return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req)
+ case json.Number:
+ v, _ := iat.Float64()
+
+ return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req)
+ }
+
+ return false
+}
+
+// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+// If req is false, it will return true, if nbf is unset.
+func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
+ cmpTime := time.Unix(cmp, 0)
+
+ v, ok := m["nbf"]
+ if !ok {
+ return !req
+ }
+
+ switch nbf := v.(type) {
+ case float64:
+ if nbf == 0 {
+ return verifyNbf(nil, cmpTime, req)
+ }
+
+ return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req)
+ case json.Number:
+ v, _ := nbf.Float64()
+
+ return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req)
+ }
+
+ return false
+}
+
+// VerifyIssuer compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
+ iss, _ := m["iss"].(string)
+ return verifyIss(iss, cmp, req)
+}
+
+// Valid validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (m MapClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc().Unix()
+
+ if !m.VerifyExpiresAt(now, false) {
+ // TODO(oxisto): this should be replaced with ErrTokenExpired
+ vErr.Inner = errors.New("Token is expired")
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if !m.VerifyIssuedAt(now, false) {
+ // TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued
+ vErr.Inner = errors.New("Token used before issued")
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if !m.VerifyNotBefore(now, false) {
+ // TODO(oxisto): this should be replaced with ErrTokenNotValidYet
+ vErr.Inner = errors.New("Token is not valid yet")
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/none.go b/vendor/github.com/golang-jwt/jwt/v4/none.go
new file mode 100644
index 00000000..f19835d2
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/none.go
@@ -0,0 +1,52 @@
+package jwt
+
+// SigningMethodNone implements the none signing method. This is required by the spec
+// but you probably should never use it.
+var SigningMethodNone *signingMethodNone
+
+const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
+
+var NoneSignatureTypeDisallowedError error
+
+type signingMethodNone struct{}
+type unsafeNoneMagicConstant string
+
+func init() {
+ SigningMethodNone = &signingMethodNone{}
+ NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
+
+ RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
+ return SigningMethodNone
+ })
+}
+
+func (m *signingMethodNone) Alg() string {
+ return "none"
+}
+
+// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
+ // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
+ // accepting 'none' signing method
+ if _, ok := key.(unsafeNoneMagicConstant); !ok {
+ return NoneSignatureTypeDisallowedError
+ }
+ // If signing method is none, signature must be an empty string
+ if signature != "" {
+ return NewValidationError(
+ "'none' signing method with non-empty signature",
+ ValidationErrorSignatureInvalid,
+ )
+ }
+
+ // Accept 'none' signing method.
+ return nil
+}
+
+// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
+ if _, ok := key.(unsafeNoneMagicConstant); ok {
+ return "", nil
+ }
+ return "", NoneSignatureTypeDisallowedError
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go
new file mode 100644
index 00000000..0fc510a0
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go
@@ -0,0 +1,206 @@
+package jwt
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+const tokenDelimiter = "."
+
+type Parser struct {
+ // If populated, only these methods will be considered valid.
+ //
+ // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+ ValidMethods []string
+
+ // Use JSON Number format in JSON decoder.
+ //
+ // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+ UseJSONNumber bool
+
+ // Skip claims validation during token parsing.
+ //
+ // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+ SkipClaimsValidation bool
+}
+
+// NewParser creates a new Parser with the specified options
+func NewParser(options ...ParserOption) *Parser {
+ p := &Parser{}
+
+ // loop through our parsing options and apply them
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token. keyFunc will
+// receive the parsed token and should return the key for validating.
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object
+// implementing the Claims interface. This provides default values which can be overridden and
+// allows a caller to use their own type, rather than the default MapClaims implementation of
+// Claims.
+//
+// Note: If you provide a custom claim implementation that embeds one of the standard claims (such
+// as RegisteredClaims), make sure that a) you either embed a non-pointer version of the claims or
+// b) if you are using a pointer, allocate the proper memory for it before passing in the overall
+// claims, otherwise you might run into a panic.
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ token, parts, err := p.ParseUnverified(tokenString, claims)
+ if err != nil {
+ return token, err
+ }
+
+ // Verify signing method is in the required set
+ if p.ValidMethods != nil {
+ var signingMethodValid = false
+ var alg = token.Method.Alg()
+ for _, m := range p.ValidMethods {
+ if m == alg {
+ signingMethodValid = true
+ break
+ }
+ }
+ if !signingMethodValid {
+ // signing method is not in the listed set
+ return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
+ }
+ }
+
+ // Lookup key
+ var key interface{}
+ if keyFunc == nil {
+ // keyFunc was not provided. short circuiting validation
+ return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
+ }
+ if key, err = keyFunc(token); err != nil {
+ // keyFunc returned an error
+ if ve, ok := err.(*ValidationError); ok {
+ return token, ve
+ }
+ return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
+ }
+
+ // Perform validation
+ token.Signature = parts[2]
+ if err := token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+ return token, &ValidationError{Inner: err, Errors: ValidationErrorSignatureInvalid}
+ }
+
+ vErr := &ValidationError{}
+
+ // Validate Claims
+ if !p.SkipClaimsValidation {
+ if err := token.Claims.Valid(); err != nil {
+ // If the Claims Valid returned an error, check if it is a validation error,
+ // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
+ if e, ok := err.(*ValidationError); !ok {
+ vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
+ } else {
+ vErr = e
+ }
+ return token, vErr
+ }
+ }
+
+ // No errors so far, token is valid.
+ token.Valid = true
+
+ return token, nil
+}
+
+// ParseUnverified parses the token but doesn't validate the signature.
+//
+// WARNING: Don't use this method unless you know what you're doing.
+//
+// It's only ever useful in cases where you know the signature is valid (because it has
+// been checked previously in the stack) and you want to extract values from it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+ var ok bool
+ parts, ok = splitToken(tokenString)
+ if !ok {
+ return nil, nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
+ }
+
+ token = &Token{Raw: tokenString}
+
+ // parse Header
+ var headerBytes []byte
+ if headerBytes, err = DecodeSegment(parts[0]); err != nil {
+ if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+ return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
+ }
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // parse Claims
+ var claimBytes []byte
+ token.Claims = claims
+
+ if claimBytes, err = DecodeSegment(parts[1]); err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ if p.UseJSONNumber {
+ dec.UseNumber()
+ }
+ // JSON Decode. Special case for map type to avoid weird pointer behavior
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
+ // Handle decode error
+ if err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // Lookup signature method
+ if method, ok := token.Header["alg"].(string); ok {
+ if token.Method = GetSigningMethod(method); token.Method == nil {
+ return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
+ }
+ } else {
+ return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
+ }
+
+ return token, parts, nil
+}
+
+// splitToken splits a token string into three parts: header, claims, and signature. It will only
+// return true if the token contains exactly two delimiters and three parts. In all other cases, it
+// will return nil parts and false.
+func splitToken(token string) ([]string, bool) {
+ parts := make([]string, 3)
+ header, remain, ok := strings.Cut(token, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[0] = header
+ claims, remain, ok := strings.Cut(remain, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[1] = claims
+ // One more cut to ensure the signature is the last part of the token and there are no more
+ // delimiters. This avoids an issue where malicious input could contain additional delimiters
+ // causing unecessary overhead parsing tokens.
+ signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
+ if unexpected {
+ return nil, false
+ }
+ parts[2] = signature
+
+ return parts, true
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go
new file mode 100644
index 00000000..6ea6f952
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go
@@ -0,0 +1,29 @@
+package jwt
+
+// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add
+// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that
+// takes a *Parser type as input and manipulates its configuration accordingly.
+type ParserOption func(*Parser)
+
+// WithValidMethods is an option to supply algorithm methods that the parser will check. Only those methods will be considered valid.
+// It is heavily encouraged to use this option in order to prevent attacks such as https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/.
+func WithValidMethods(methods []string) ParserOption {
+ return func(p *Parser) {
+ p.ValidMethods = methods
+ }
+}
+
+// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber
+func WithJSONNumber() ParserOption {
+ return func(p *Parser) {
+ p.UseJSONNumber = true
+ }
+}
+
+// WithoutClaimsValidation is an option to disable claims validation. This option should only be used if you exactly know
+// what you are doing.
+func WithoutClaimsValidation() ParserOption {
+ return func(p *Parser) {
+ p.SkipClaimsValidation = true
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa.go b/vendor/github.com/golang-jwt/jwt/v4/rsa.go
new file mode 100644
index 00000000..b910b19c
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa.go
@@ -0,0 +1,101 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSA implements the RSA family of signing methods.
+// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+type SigningMethodRSA struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for RS256 and company
+var (
+ SigningMethodRS256 *SigningMethodRSA
+ SigningMethodRS384 *SigningMethodRSA
+ SigningMethodRS512 *SigningMethodRSA
+)
+
+func init() {
+ // RS256
+ SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+ return SigningMethodRS256
+ })
+
+ // RS384
+ SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+ return SigningMethodRS384
+ })
+
+ // RS512
+ SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+ return SigningMethodRS512
+ })
+}
+
+func (m *SigningMethodRSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod
+// For this signing method, must be an *rsa.PublicKey structure.
+func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ var rsaKey *rsa.PublicKey
+ var ok bool
+
+ if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+ return ErrInvalidKeyType
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+}
+
+// Sign implements token signing for the SigningMethod
+// For this signing method, must be an *rsa.PrivateKey structure.
+func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
+ var rsaKey *rsa.PrivateKey
+ var ok bool
+
+ // Validate type of key
+ if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+ return "", ErrInvalidKey
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+ return EncodeSegment(sigBytes), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
new file mode 100644
index 00000000..4fd6f9e6
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
@@ -0,0 +1,143 @@
+//go:build go1.4
+// +build go1.4
+
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
+type SigningMethodRSAPSS struct {
+ *SigningMethodRSA
+ Options *rsa.PSSOptions
+ // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
+ // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
+ // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
+ // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
+ VerifyOptions *rsa.PSSOptions
+}
+
+// Specific instances for RS/PS and company.
+var (
+ SigningMethodPS256 *SigningMethodRSAPSS
+ SigningMethodPS384 *SigningMethodRSAPSS
+ SigningMethodPS512 *SigningMethodRSAPSS
+)
+
+func init() {
+ // PS256
+ SigningMethodPS256 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS256",
+ Hash: crypto.SHA256,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+ return SigningMethodPS256
+ })
+
+ // PS384
+ SigningMethodPS384 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS384",
+ Hash: crypto.SHA384,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+ return SigningMethodPS384
+ })
+
+ // PS512
+ SigningMethodPS512 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS512",
+ Hash: crypto.SHA512,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+ return SigningMethodPS512
+ })
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an rsa.PublicKey struct
+func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ var rsaKey *rsa.PublicKey
+ switch k := key.(type) {
+ case *rsa.PublicKey:
+ rsaKey = k
+ default:
+ return ErrInvalidKey
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ opts := m.Options
+ if m.VerifyOptions != nil {
+ opts = m.VerifyOptions
+ }
+
+ return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an rsa.PrivateKey struct
+func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
+ var rsaKey *rsa.PrivateKey
+
+ switch k := key.(type) {
+ case *rsa.PrivateKey:
+ rsaKey = k
+ default:
+ return "", ErrInvalidKeyType
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+ return EncodeSegment(sigBytes), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go
new file mode 100644
index 00000000..1966c450
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go
@@ -0,0 +1,105 @@
+package jwt
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
+ ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
+ ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
+)
+
+// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
+//
+// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
+// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
+// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+
+ var blockDecrypted []byte
+ if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+ return nil, err
+ }
+
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, ErrNotRSAPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go
new file mode 100644
index 00000000..241ae9c6
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go
@@ -0,0 +1,46 @@
+package jwt
+
+import (
+ "sync"
+)
+
+var signingMethods = map[string]func() SigningMethod{}
+var signingMethodLock = new(sync.RWMutex)
+
+// SigningMethod can be used add new methods for signing or verifying tokens.
+type SigningMethod interface {
+ Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
+ Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
+ Alg() string // returns the alg identifier for this method (example: 'HS256')
+}
+
+// RegisterSigningMethod registers the "alg" name and a factory function for signing method.
+// This is typically done during init() in the method's implementation
+func RegisterSigningMethod(alg string, f func() SigningMethod) {
+ signingMethodLock.Lock()
+ defer signingMethodLock.Unlock()
+
+ signingMethods[alg] = f
+}
+
+// GetSigningMethod retrieves a signing method from an "alg" string
+func GetSigningMethod(alg string) (method SigningMethod) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ if methodF, ok := signingMethods[alg]; ok {
+ method = methodF()
+ }
+ return
+}
+
+// GetAlgorithms returns a list of registered "alg" names
+func GetAlgorithms() (algs []string) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ for alg := range signingMethods {
+ algs = append(algs, alg)
+ }
+ return
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf
new file mode 100644
index 00000000..53745d51
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf
@@ -0,0 +1 @@
+checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"]
diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go
new file mode 100644
index 00000000..786b275c
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/token.go
@@ -0,0 +1,143 @@
+package jwt
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "strings"
+ "time"
+)
+
+// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515
+// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations
+// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global
+// variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe.
+// To use the non-recommended decoding, set this boolean to `true` prior to using this package.
+var DecodePaddingAllowed bool
+
+// DecodeStrict will switch the codec used for decoding JWTs into strict mode.
+// In this mode, the decoder requires that trailing padding bits are zero, as described in RFC 4648 section 3.5.
+// Note that this is a global variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe.
+// To use strict decoding, set this boolean to `true` prior to using this package.
+var DecodeStrict bool
+
+// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
+// You can override it to use another time value. This is useful for testing or if your
+// server uses a different time zone than your tokens.
+var TimeFunc = time.Now
+
+// Keyfunc will be used by the Parse methods as a callback function to supply
+// the key for verification. The function receives the parsed,
+// but unverified Token. This allows you to use properties in the
+// Header of the token (such as `kid`) to identify which key to use.
+type Keyfunc func(*Token) (interface{}, error)
+
+// Token represents a JWT Token. Different fields will be used depending on whether you're
+// creating or parsing/verifying a token.
+type Token struct {
+ Raw string // The raw token. Populated when you Parse a token
+ Method SigningMethod // The signing method used or to be used
+ Header map[string]interface{} // The first segment of the token
+ Claims Claims // The second segment of the token
+ Signature string // The third segment of the token. Populated when you Parse a token
+ Valid bool // Is the token valid? Populated when you Parse/Verify a token
+}
+
+// New creates a new Token with the specified signing method and an empty map of claims.
+func New(method SigningMethod) *Token {
+ return NewWithClaims(method, MapClaims{})
+}
+
+// NewWithClaims creates a new Token with the specified signing method and claims.
+func NewWithClaims(method SigningMethod, claims Claims) *Token {
+ return &Token{
+ Header: map[string]interface{}{
+ "typ": "JWT",
+ "alg": method.Alg(),
+ },
+ Claims: claims,
+ Method: method,
+ }
+}
+
+// SignedString creates and returns a complete, signed JWT.
+// The token is signed using the SigningMethod specified in the token.
+func (t *Token) SignedString(key interface{}) (string, error) {
+ var sig, sstr string
+ var err error
+ if sstr, err = t.SigningString(); err != nil {
+ return "", err
+ }
+ if sig, err = t.Method.Sign(sstr, key); err != nil {
+ return "", err
+ }
+ return strings.Join([]string{sstr, sig}, "."), nil
+}
+
+// SigningString generates the signing string. This is the
+// most expensive part of the whole deal. Unless you
+// need this for something special, just go straight for
+// the SignedString.
+func (t *Token) SigningString() (string, error) {
+ var err error
+ var jsonValue []byte
+
+ if jsonValue, err = json.Marshal(t.Header); err != nil {
+ return "", err
+ }
+ header := EncodeSegment(jsonValue)
+
+ if jsonValue, err = json.Marshal(t.Claims); err != nil {
+ return "", err
+ }
+ claim := EncodeSegment(jsonValue)
+
+ return strings.Join([]string{header, claim}, "."), nil
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the cryptographic key
+// for verifying the signature.
+// The caller is strongly encouraged to set the WithValidMethods option to
+// validate the 'alg' claim in the token matches the expected algorithm.
+// For more details about the importance of validating the 'alg' claim,
+// see https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
+func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).Parse(tokenString, keyFunc)
+}
+
+// ParseWithClaims is a shortcut for NewParser().ParseWithClaims().
+//
+// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
+// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
+// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc)
+}
+
+// EncodeSegment encodes a JWT specific base64url encoding with padding stripped
+//
+// Deprecated: In a future release, we will demote this function to a non-exported function, since it
+// should only be used internally
+func EncodeSegment(seg []byte) string {
+ return base64.RawURLEncoding.EncodeToString(seg)
+}
+
+// DecodeSegment decodes a JWT specific base64url encoding with padding stripped
+//
+// Deprecated: In a future release, we will demote this function to a non-exported function, since it
+// should only be used internally
+func DecodeSegment(seg string) ([]byte, error) {
+ encoding := base64.RawURLEncoding
+
+ if DecodePaddingAllowed {
+ if l := len(seg) % 4; l > 0 {
+ seg += strings.Repeat("=", 4-l)
+ }
+ encoding = base64.URLEncoding
+ }
+
+ if DecodeStrict {
+ encoding = encoding.Strict()
+ }
+ return encoding.DecodeString(seg)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go
new file mode 100644
index 00000000..ac8e140e
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v4/types.go
@@ -0,0 +1,145 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// TimePrecision sets the precision of times and dates within this library.
+// This has an influence on the precision of times when comparing expiry or
+// other related time fields. Furthermore, it is also the precision of times
+// when serializing.
+//
+// For backwards compatibility the default precision is set to seconds, so that
+// no fractional timestamps are generated.
+var TimePrecision = time.Second
+
+// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially
+// its MarshalJSON function.
+//
+// If it is set to true (the default), it will always serialize the type as an
+// array of strings, even if it just contains one element, defaulting to the behaviour
+// of the underlying []string. If it is set to false, it will serialize to a single
+// string, if it contains one element. Otherwise, it will serialize to an array of strings.
+var MarshalSingleStringAsArray = true
+
+// NumericDate represents a JSON numeric date value, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-2.
+type NumericDate struct {
+ time.Time
+}
+
+// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct.
+// It will truncate the timestamp according to the precision specified in TimePrecision.
+func NewNumericDate(t time.Time) *NumericDate {
+ return &NumericDate{t.Truncate(TimePrecision)}
+}
+
+// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a
+// UNIX epoch with the float fraction representing non-integer seconds.
+func newNumericDateFromSeconds(f float64) *NumericDate {
+ round, frac := math.Modf(f)
+ return NewNumericDate(time.Unix(int64(round), int64(frac*1e9)))
+}
+
+// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
+// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
+func (date NumericDate) MarshalJSON() (b []byte, err error) {
+ var prec int
+ if TimePrecision < time.Second {
+ prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
+ }
+ truncatedDate := date.Truncate(TimePrecision)
+
+ // For very large timestamps, UnixNano would overflow an int64, but this
+ // function requires nanosecond level precision, so we have to use the
+ // following technique to get round the issue:
+ // 1. Take the normal unix timestamp to form the whole number part of the
+ // output,
+ // 2. Take the result of the Nanosecond function, which retuns the offset
+ // within the second of the particular unix time instance, to form the
+ // decimal part of the output
+ // 3. Concatenate them to produce the final result
+ seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
+ nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
+
+ output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
+
+ return output, nil
+}
+
+// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a
+// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch
+// with either integer or non-integer seconds.
+func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
+ var (
+ number json.Number
+ f float64
+ )
+
+ if err = json.Unmarshal(b, &number); err != nil {
+ return fmt.Errorf("could not parse NumericData: %w", err)
+ }
+
+ if f, err = number.Float64(); err != nil {
+ return fmt.Errorf("could not convert json number value to float: %w", err)
+ }
+
+ n := newNumericDateFromSeconds(f)
+ *date = *n
+
+ return nil
+}
+
+// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string.
+// This type is necessary, since the "aud" claim can either be a single string or an array.
+type ClaimStrings []string
+
+func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
+ var value interface{}
+
+ if err = json.Unmarshal(data, &value); err != nil {
+ return err
+ }
+
+ var aud []string
+
+ switch v := value.(type) {
+ case string:
+ aud = append(aud, v)
+ case []string:
+ aud = ClaimStrings(v)
+ case []interface{}:
+ for _, vv := range v {
+ vs, ok := vv.(string)
+ if !ok {
+ return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)}
+ }
+ aud = append(aud, vs)
+ }
+ case nil:
+ return nil
+ default:
+ return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)}
+ }
+
+ *s = aud
+
+ return
+}
+
+func (s ClaimStrings) MarshalJSON() (b []byte, err error) {
+ // This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field,
+ // only contains one element, it MAY be serialized as a single string. This may or may not be
+ // desired based on the ecosystem of other JWT library used, so we make it configurable by the
+ // variable MarshalSingleStringAsArray.
+ if len(s) == 1 && !MarshalSingleStringAsArray {
+ return json.Marshal(s[0])
+ }
+
+ return json.Marshal([]string(s))
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/.gitignore b/vendor/github.com/golang-jwt/jwt/v5/.gitignore
new file mode 100644
index 00000000..09573e01
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+bin
+.idea/
+
diff --git a/vendor/github.com/golang-jwt/jwt/v5/LICENSE b/vendor/github.com/golang-jwt/jwt/v5/LICENSE
new file mode 100644
index 00000000..35dbc252
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) 2012 Dave Grijalva
+Copyright (c) 2021 golang-jwt maintainers
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
new file mode 100644
index 00000000..b3178e75
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
@@ -0,0 +1,195 @@
+# Migration Guide (v5.0.0)
+
+Version `v5` contains a major rework of core functionalities in the `jwt-go`
+library. This includes support for several validation options as well as a
+re-design of the `Claims` interface. Lastly, we reworked how errors work under
+the hood, which should provide a better overall developer experience.
+
+Starting from [v5.0.0](https://github.com/golang-jwt/jwt/releases/tag/v5.0.0),
+the import path will be:
+
+ "github.com/golang-jwt/jwt/v5"
+
+For most users, changing the import path *should* suffice. However, since we
+intentionally changed and cleaned some of the public API, existing programs
+might need to be updated. The following sections describe significant changes
+and corresponding updates for existing programs.
+
+## Parsing and Validation Options
+
+Under the hood, a new `Validator` struct takes care of validating the claims. A
+long awaited feature has been the option to fine-tune the validation of tokens.
+This is now possible with several `ParserOption` functions that can be appended
+to most `Parse` functions, such as `ParseWithClaims`. The most important options
+and changes are:
+ * Added `WithLeeway` to support specifying the leeway that is allowed when
+ validating time-based claims, such as `exp` or `nbf`.
+ * Changed default behavior to not check the `iat` claim. Usage of this claim
+ is OPTIONAL according to the JWT RFC. The claim itself is also purely
+ informational according to the RFC, so a strict validation failure is not
+ recommended. If you want to check for sensible values in these claims,
+ please use the `WithIssuedAt` parser option.
+ * Added `WithAudience`, `WithSubject` and `WithIssuer` to support checking for
+ expected `aud`, `sub` and `iss`.
+ * Added `WithStrictDecoding` and `WithPaddingAllowed` options to allow
+ previously global settings to enable base64 strict encoding and the parsing
+ of base64 strings with padding. The latter is strictly speaking against the
+ standard, but unfortunately some of the major identity providers issue some
+ of these incorrect tokens. Both options are disabled by default.
+
+## Changes to the `Claims` interface
+
+### Complete Restructuring
+
+Previously, the claims interface was satisfied with an implementation of a
+`Valid() error` function. This had several issues:
+ * The different claim types (struct claims, map claims, etc.) then contained
+ similar (but not 100 % identical) code of how this validation was done. This
+ lead to a lot of (almost) duplicate code and was hard to maintain
+ * It was not really semantically close to what a "claim" (or a set of claims)
+ really is; which is a list of defined key/value pairs with a certain
+ semantic meaning.
+
+Since all the validation functionality is now extracted into the validator, all
+`VerifyXXX` and `Valid` functions have been removed from the `Claims` interface.
+Instead, the interface now represents a list of getters to retrieve values with
+a specific meaning. This allows us to completely decouple the validation logic
+with the underlying storage representation of the claim, which could be a
+struct, a map or even something stored in a database.
+
+```go
+type Claims interface {
+ GetExpirationTime() (*NumericDate, error)
+ GetIssuedAt() (*NumericDate, error)
+ GetNotBefore() (*NumericDate, error)
+ GetIssuer() (string, error)
+ GetSubject() (string, error)
+ GetAudience() (ClaimStrings, error)
+}
+```
+
+Users that previously directly called the `Valid` function on their claims,
+e.g., to perform validation independently of parsing/verifying a token, can now
+use the `jwt.NewValidator` function to create a `Validator` independently of the
+`Parser`.
+
+```go
+var v = jwt.NewValidator(jwt.WithLeeway(5*time.Second))
+v.Validate(myClaims)
+```
+
+### Supported Claim Types and Removal of `StandardClaims`
+
+The two standard claim types supported by this library, `MapClaims` and
+`RegisteredClaims` both implement the necessary functions of this interface. The
+old `StandardClaims` struct, which has already been deprecated in `v4` is now
+removed.
+
+Users using custom claims, in most cases, will not experience any changes in the
+behavior as long as they embedded `RegisteredClaims`. If they created a new
+claim type from scratch, they now need to implemented the proper getter
+functions.
+
+### Migrating Application Specific Logic of the old `Valid`
+
+Previously, users could override the `Valid` method in a custom claim, for
+example to extend the validation with application-specific claims. However, this
+was always very dangerous, since once could easily disable the standard
+validation and signature checking.
+
+In order to avoid that, while still supporting the use-case, a new
+`ClaimsValidator` interface has been introduced. This interface consists of the
+`Validate() error` function. If the validator sees, that a `Claims` struct
+implements this interface, the errors returned to the `Validate` function will
+be *appended* to the regular standard validation. It is not possible to disable
+the standard validation anymore (even only by accident).
+
+Usage examples can be found in [example_test.go](./example_test.go), to build
+claims structs like the following.
+
+```go
+// MyCustomClaims includes all registered claims, plus Foo.
+type MyCustomClaims struct {
+ Foo string `json:"foo"`
+ jwt.RegisteredClaims
+}
+
+// Validate can be used to execute additional application-specific claims
+// validation.
+func (m MyCustomClaims) Validate() error {
+ if m.Foo != "bar" {
+ return errors.New("must be foobar")
+ }
+
+ return nil
+}
+```
+
+## Changes to the `Token` and `Parser` struct
+
+The previously global functions `DecodeSegment` and `EncodeSegment` were moved
+to the `Parser` and `Token` struct respectively. This will allow us in the
+future to configure the behavior of these two based on options supplied on the
+parser or the token (creation). This also removes two previously global
+variables and moves them to parser options `WithStrictDecoding` and
+`WithPaddingAllowed`.
+
+In order to do that, we had to adjust the way signing methods work. Previously
+they were given a base64 encoded signature in `Verify` and were expected to
+return a base64 encoded version of the signature in `Sign`, both as a `string`.
+However, this made it necessary to have `DecodeSegment` and `EncodeSegment`
+global and was a less than perfect design because we were repeating
+encoding/decoding steps for all signing methods. Now, `Sign` and `Verify`
+operate on a decoded signature as a `[]byte`, which feels more natural for a
+cryptographic operation anyway. Lastly, `Parse` and `SignedString` take care of
+the final encoding/decoding part.
+
+In addition to that, we also changed the `Signature` field on `Token` from a
+`string` to `[]byte` and this is also now populated with the decoded form. This
+is also more consistent, because the other parts of the JWT, mainly `Header` and
+`Claims` were already stored in decoded form in `Token`. Only the signature was
+stored in base64 encoded form, which was redundant with the information in the
+`Raw` field, which contains the complete token as base64.
+
+```go
+type Token struct {
+ Raw string // Raw contains the raw token
+ Method SigningMethod // Method is the signing method used or to be used
+ Header map[string]any // Header is the first segment of the token in decoded form
+ Claims Claims // Claims is the second segment of the token in decoded form
+ Signature []byte // Signature is the third segment of the token in decoded form
+ Valid bool // Valid specifies if the token is valid
+}
+```
+
+Most (if not all) of these changes should not impact the normal usage of this
+library. Only users directly accessing the `Signature` field as well as
+developers of custom signing methods should be affected.
+
+# Migration Guide (v4.0.0)
+
+Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0),
+the import path will be:
+
+ "github.com/golang-jwt/jwt/v4"
+
+The `/v4` version will be backwards compatible with existing `v3.x.y` tags in
+this repo, as well as `github.com/dgrijalva/jwt-go`. For most users this should
+be a drop-in replacement, if you're having troubles migrating, please open an
+issue.
+
+You can replace all occurrences of `github.com/dgrijalva/jwt-go` or
+`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually
+or by using tools such as `sed` or `gofmt`.
+
+And then you'd typically run:
+
+```
+go get github.com/golang-jwt/jwt/v4
+go mod tidy
+```
+
+# Older releases (before v3.2.0)
+
+The original migration guide for older releases can be found at
+https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
diff --git a/vendor/github.com/golang-jwt/jwt/v5/README.md b/vendor/github.com/golang-jwt/jwt/v5/README.md
new file mode 100644
index 00000000..0bb636f2
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/README.md
@@ -0,0 +1,167 @@
+# jwt-go
+
+[](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
+[](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
+[](https://coveralls.io/github/golang-jwt/jwt?branch=main)
+
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness)
+implementation of [JSON Web
+Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
+
+Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0)
+this project adds Go module support, but maintains backward compatibility with
+older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the
+[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version
+v5.0.0 introduces major improvements to the validation of tokens, but is not
+entirely backward compatible.
+
+> After the original author of the library suggested migrating the maintenance
+> of `jwt-go`, a dedicated team of open source maintainers decided to clone the
+> existing library into this repository. See
+> [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a
+> detailed discussion on this topic.
+
+
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the
+crypto/elliptic. The recommendation is to upgrade to at least 1.15 See issue
+[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more
+detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is
+what you
+expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/).
+This library attempts to make it easy to do the right thing by requiring key
+types to match the expected alg, but you should take the extra step to verify it in
+your usage. See the examples provided.
+
+### Supported Go versions
+
+Our support of Go versions is aligned with Go's [version release
+policy](https://golang.org/doc/devel/release#policy). So we will support a major
+version of Go until there are two newer major releases. We no longer support
+building jwt-go with unsupported Go versions, as these contain security
+vulnerabilities that will not be fixed.
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web
+Tokens.
+
+In short, it's a signed JSON object that does something useful (for example,
+authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is
+made of three parts, separated by `.`'s. The first two parts are JSON objects,
+that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648)
+encoded. The last part is the signature, encoded the same way.
+
+The first part is called the header. It contains the necessary information for
+verifying the last part, the signature. For example, which encryption method
+was used for signing and what key was used.
+
+The part in the middle is the interesting bit. It's called the Claims and
+contains the actual stuff you care about. Refer to [RFC
+7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about
+reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and
+signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA,
+RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Installation Guidelines
+
+1. To install the jwt package, you first need to have
+ [Go](https://go.dev/doc/install) installed, then you can use the command
+ below to add `jwt-go` as a dependency in your Go program.
+
+```sh
+go get -u github.com/golang-jwt/jwt/v5
+```
+
+2. Import it in your code:
+
+```go
+import "github.com/golang-jwt/jwt/v5"
+```
+
+## Usage
+
+A detailed usage guide, including how to sign and verify tokens can be found on
+our [documentation website](https://golang-jwt.github.io/jwt/usage/create/).
+
+## Examples
+
+See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
+for examples of usage:
+
+* [Simple example of parsing and validating a
+ token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-Parse-Hmac)
+* [Simple example of building and signing a
+ token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-New-Hmac)
+* [Directory of
+ Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#pkg-examples)
+
+## Compliance
+
+This library was last reviewed to comply with [RFC
+7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few
+notable differences:
+
+* In order to protect against accidental use of [Unsecured
+ JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using
+ `alg=none` will only be accepted if the constant
+ `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready. Feedback and feature requests are
+appreciated. The API should be considered stable. There should be very few
+backward-incompatible changes outside of major version updates (and only with
+good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull
+requests will land on `main`. Periodically, versions will be tagged from
+`main`. You can find all the releases on [the project releases
+page](https://github.com/golang-jwt/jwt/releases).
+
+**BREAKING CHANGES:** A full list of breaking changes is available in
+`VERSION_HISTORY.md`. See [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information on updating
+your code.
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing
+methods or key functions. Simply implement the `SigningMethod` interface and
+register a factory method using `RegisterSigningMethod` or provide a
+`jwt.Keyfunc`.
+
+A common use case would be integrating with different 3rd party signature
+providers, like key management services from various cloud providers or Hardware
+Security Modules (HSMs) or to implement additional standards.
+
+| Extension | Purpose | Repo |
+| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
+| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
+| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
+| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
+
+*Disclaimer*: Unless otherwise specified, these integrations are maintained by
+third parties and should not be considered as a primary offer by any of the
+mentioned cloud providers
+
+## More
+
+Go package documentation can be found [on
+pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v5). Additional
+documentation can be found on [our project
+page](https://golang-jwt.github.io/jwt/).
+
+The command line utility included in this project (cmd/jwt) provides a
+straightforward example of token creation and parsing as well as a useful tool
+for debugging your own integration. You'll also find several implementation
+examples in the documentation.
+
+[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version
+of the JWT logo, which is distributed under the terms of the [MIT
+License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt).
diff --git a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
new file mode 100644
index 00000000..2740597f
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+As of November 2024 (and until this document is updated), the latest version `v5` is supported. In critical cases, we might supply back-ported patches for `v4`.
+
+## Reporting a Vulnerability
+
+If you think you found a vulnerability, and even if you are not sure, please report it a [GitHub Security Advisory](https://github.com/golang-jwt/jwt/security/advisories/new). Please try be explicit, describe steps to reproduce the security issue with code example(s).
+
+You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
+
+## Public Discussions
+
+Please avoid publicly discussing a potential security vulnerability.
+
+Let's take this offline and find a solution first, this limits the potential impact as much as possible.
+
+We appreciate your help!
diff --git a/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
new file mode 100644
index 00000000..b5039e49
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
@@ -0,0 +1,137 @@
+# `jwt-go` Version History
+
+The following version history is kept for historic purposes. To retrieve the current changes of each version, please refer to the change-log of the specific release versions on https://github.com/golang-jwt/jwt/releases.
+
+## 4.0.0
+
+* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`.
+
+## 3.2.2
+
+* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
+* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)).
+* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)).
+* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)).
+
+## 3.2.1
+
+* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`
+* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160
+
+#### 3.2.0
+
+* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
+* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
+* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
+* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
+
+#### 3.1.0
+
+* Improvements to `jwt` command line tool
+* Added `SkipClaimsValidation` option to `Parser`
+* Documentation updates
+
+#### 3.0.0
+
+* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
+ * `ParseFromRequest` has been moved to `request` subpackage and usage has changed
+ * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
+* Other Additions and Changes
+ * Added `Claims` interface type to allow users to decode the claims into a custom type
+ * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
+ * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
+ * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
+ * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
+ * Added several new, more specific, validation errors to error type bitmask
+ * Moved examples from README to executable example files
+ * Signing method registry is now thread safe
+ * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
+
+#### 2.7.0
+
+This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
+
+* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
+* Error text for expired tokens includes how long it's been expired
+* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
+* Documentation updates
+
+#### 2.6.0
+
+* Exposed inner error within ValidationError
+* Fixed validation errors when using UseJSONNumber flag
+* Added several unit tests
+
+#### 2.5.0
+
+* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
+* Updated/fixed some documentation
+* Added more helpful error message when trying to parse tokens that begin with `BEARER `
+
+#### 2.4.0
+
+* Added new type, Parser, to allow for configuration of various parsing parameters
+ * You can now specify a list of valid signing methods. Anything outside this set will be rejected.
+ * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
+* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
+* Fixed some bugs with ECDSA parsing
+
+#### 2.3.0
+
+* Added support for ECDSA signing methods
+* Added support for RSA PSS signing methods (requires go v1.4)
+
+#### 2.2.0
+
+* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
+
+#### 2.1.0
+
+Backwards compatible API change that was missed in 2.0.0.
+
+* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
+
+#### 2.0.0
+
+There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
+
+The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
+
+It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
+
+* **Compatibility Breaking Changes**
+ * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
+ * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
+ * `KeyFunc` now returns `interface{}` instead of `[]byte`
+ * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
+ * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
+* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodHS256`
+ * Added public package global `SigningMethodHS384`
+ * Added public package global `SigningMethodHS512`
+* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodRS256`
+ * Added public package global `SigningMethodRS384`
+ * Added public package global `SigningMethodRS512`
+* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
+* Refactored the RSA implementation to be easier to read
+* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
+
+## 1.0.2
+
+* Fixed bug in parsing public keys from certificates
+* Added more tests around the parsing of keys for RS256
+* Code refactoring in RS256 implementation. No functional changes
+
+## 1.0.1
+
+* Fixed panic if RS256 signing method was passed an invalid key
+
+## 1.0.0
+
+* First versioned release
+* API stabilized
+* Supports creating, signing, parsing, and validating JWT tokens
+* Supports RS256 and HS256 signing methods
diff --git a/vendor/github.com/golang-jwt/jwt/v5/claims.go b/vendor/github.com/golang-jwt/jwt/v5/claims.go
new file mode 100644
index 00000000..d50ff3da
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/claims.go
@@ -0,0 +1,16 @@
+package jwt
+
+// Claims represent any form of a JWT Claims Set according to
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4. In order to have a
+// common basis for validation, it is required that an implementation is able to
+// supply at least the claim names provided in
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 namely `exp`,
+// `iat`, `nbf`, `iss`, `sub` and `aud`.
+type Claims interface {
+ GetExpirationTime() (*NumericDate, error)
+ GetIssuedAt() (*NumericDate, error)
+ GetNotBefore() (*NumericDate, error)
+ GetIssuer() (string, error)
+ GetSubject() (string, error)
+ GetAudience() (ClaimStrings, error)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/doc.go b/vendor/github.com/golang-jwt/jwt/v5/doc.go
new file mode 100644
index 00000000..a86dc1a3
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/doc.go
@@ -0,0 +1,4 @@
+// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+//
+// See README.md for more info.
+package jwt
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
new file mode 100644
index 00000000..06cd94d2
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
@@ -0,0 +1,134 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "errors"
+ "math/big"
+)
+
+var (
+ // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+ ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
+)
+
+// SigningMethodECDSA implements the ECDSA family of signing methods.
+// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+type SigningMethodECDSA struct {
+ Name string
+ Hash crypto.Hash
+ KeySize int
+ CurveBits int
+}
+
+// Specific instances for EC256 and company
+var (
+ SigningMethodES256 *SigningMethodECDSA
+ SigningMethodES384 *SigningMethodECDSA
+ SigningMethodES512 *SigningMethodECDSA
+)
+
+func init() {
+ // ES256
+ SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+ RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+ return SigningMethodES256
+ })
+
+ // ES384
+ SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+ RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+ return SigningMethodES384
+ })
+
+ // ES512
+ SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+ RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+ return SigningMethodES512
+ })
+}
+
+func (m *SigningMethodECDSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ecdsa.PublicKey struct
+func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key any) error {
+ // Get the key
+ var ecdsaKey *ecdsa.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ ecdsaKey = k
+ default:
+ return newError("ECDSA verify expects *ecdsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ if len(sig) != 2*m.KeySize {
+ return ErrECDSAVerification
+ }
+
+ r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+ s := big.NewInt(0).SetBytes(sig[m.KeySize:])
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
+ return nil
+ }
+
+ return ErrECDSAVerification
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ecdsa.PrivateKey struct
+func (m *SigningMethodECDSA) Sign(signingString string, key any) ([]byte, error) {
+ // Get the key
+ var ecdsaKey *ecdsa.PrivateKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ ecdsaKey = k
+ default:
+ return nil, newError("ECDSA sign expects *ecdsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return r, s
+ if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+ curveBits := ecdsaKey.Curve.Params().BitSize
+
+ if m.CurveBits != curveBits {
+ return nil, ErrInvalidKey
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes += 1
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays
+ // padded with zeros on the left to make sure the sizes work out.
+ // Output must be 2*keyBytes long.
+ out := make([]byte, 2*keyBytes)
+ r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
+ s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
+
+ return out, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
new file mode 100644
index 00000000..44a3b7a1
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
@@ -0,0 +1,69 @@
+package jwt
+
+import (
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+ ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
+)
+
+// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
+func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey any
+ if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+ return nil, ErrNotECPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey any
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+ return nil, ErrNotECPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
new file mode 100644
index 00000000..4159e57b
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
@@ -0,0 +1,79 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/rand"
+ "errors"
+)
+
+var (
+ ErrEd25519Verification = errors.New("ed25519: verification error")
+)
+
+// SigningMethodEd25519 implements the EdDSA family.
+// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+type SigningMethodEd25519 struct{}
+
+// Specific instance for EdDSA
+var (
+ SigningMethodEdDSA *SigningMethodEd25519
+)
+
+func init() {
+ SigningMethodEdDSA = &SigningMethodEd25519{}
+ RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+ return SigningMethodEdDSA
+ })
+}
+
+func (m *SigningMethodEd25519) Alg() string {
+ return "EdDSA"
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ed25519.PublicKey
+func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key any) error {
+ var ed25519Key ed25519.PublicKey
+ var ok bool
+
+ if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+ return newError("Ed25519 verify expects ed25519.PublicKey", ErrInvalidKeyType)
+ }
+
+ if len(ed25519Key) != ed25519.PublicKeySize {
+ return ErrInvalidKey
+ }
+
+ // Verify the signature
+ if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+ return ErrEd25519Verification
+ }
+
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ed25519.PrivateKey
+func (m *SigningMethodEd25519) Sign(signingString string, key any) ([]byte, error) {
+ var ed25519Key crypto.Signer
+ var ok bool
+
+ if ed25519Key, ok = key.(crypto.Signer); !ok {
+ return nil, newError("Ed25519 sign expects crypto.Signer", ErrInvalidKeyType)
+ }
+
+ if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
+ return nil, ErrInvalidKey
+ }
+
+ // Sign the string and return the result. ed25519 performs a two-pass hash
+ // as part of its algorithm. Therefore, we need to pass a non-prehashed
+ // message into the Sign function, as indicated by crypto.Hash(0)
+ sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0))
+ if err != nil {
+ return nil, err
+ }
+
+ return sig, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
new file mode 100644
index 00000000..6f46e886
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
@@ -0,0 +1,64 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
+ ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
+)
+
+// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
+func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey any
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+ return nil, ErrNotEdPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
+func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey any
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+ return nil, ErrNotEdPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors.go b/vendor/github.com/golang-jwt/jwt/v5/errors.go
new file mode 100644
index 00000000..14e00751
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors.go
@@ -0,0 +1,89 @@
+package jwt
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+var (
+ ErrInvalidKey = errors.New("key is invalid")
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+ ErrTokenMalformed = errors.New("token is malformed")
+ ErrTokenUnverifiable = errors.New("token is unverifiable")
+ ErrTokenSignatureInvalid = errors.New("token signature is invalid")
+ ErrTokenRequiredClaimMissing = errors.New("token is missing required claim")
+ ErrTokenInvalidAudience = errors.New("token has invalid audience")
+ ErrTokenExpired = errors.New("token is expired")
+ ErrTokenUsedBeforeIssued = errors.New("token used before issued")
+ ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
+ ErrTokenInvalidSubject = errors.New("token has invalid subject")
+ ErrTokenNotValidYet = errors.New("token is not valid yet")
+ ErrTokenInvalidId = errors.New("token has invalid id")
+ ErrTokenInvalidClaims = errors.New("token has invalid claims")
+ ErrInvalidType = errors.New("invalid type for claim")
+)
+
+// joinedError is an error type that works similar to what [errors.Join]
+// produces, with the exception that it has a nice error string; mainly its
+// error messages are concatenated using a comma, rather than a newline.
+type joinedError struct {
+ errs []error
+}
+
+func (je joinedError) Error() string {
+ msg := []string{}
+ for _, err := range je.errs {
+ msg = append(msg, err.Error())
+ }
+
+ return strings.Join(msg, ", ")
+}
+
+// joinErrors joins together multiple errors. Useful for scenarios where
+// multiple errors next to each other occur, e.g., in claims validation.
+func joinErrors(errs ...error) error {
+ return &joinedError{
+ errs: errs,
+ }
+}
+
+// Unwrap implements the multiple error unwrapping for this error type, which is
+// possible in Go 1.20.
+func (je joinedError) Unwrap() []error {
+ return je.errs
+}
+
+// newError creates a new error message with a detailed error message. The
+// message will be prefixed with the contents of the supplied error type.
+// Additionally, more errors, that provide more context can be supplied which
+// will be appended to the message. This makes use of Go 1.20's possibility to
+// include more than one %w formatting directive in [fmt.Errorf].
+//
+// For example,
+//
+// newError("no keyfunc was provided", ErrTokenUnverifiable)
+//
+// will produce the error string
+//
+// "token is unverifiable: no keyfunc was provided"
+func newError(message string, err error, more ...error) error {
+ var format string
+ var args []any
+ if message != "" {
+ format = "%w: %s"
+ args = []any{err, message}
+ } else {
+ format = "%w"
+ args = []any{err}
+ }
+
+ for _, e := range more {
+ format += ": %w"
+ args = append(args, e)
+ }
+
+ err = fmt.Errorf(format, args...)
+ return err
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
new file mode 100644
index 00000000..1bef138c
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
@@ -0,0 +1,104 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/hmac"
+ "errors"
+)
+
+// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
+// Expects key type of []byte for both signing and validation
+type SigningMethodHMAC struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for HS256 and company
+var (
+ SigningMethodHS256 *SigningMethodHMAC
+ SigningMethodHS384 *SigningMethodHMAC
+ SigningMethodHS512 *SigningMethodHMAC
+ ErrSignatureInvalid = errors.New("signature is invalid")
+)
+
+func init() {
+ // HS256
+ SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+ return SigningMethodHS256
+ })
+
+ // HS384
+ SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+ return SigningMethodHS384
+ })
+
+ // HS512
+ SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+ return SigningMethodHS512
+ })
+}
+
+func (m *SigningMethodHMAC) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod. Returns nil if
+// the signature is valid. Key must be []byte.
+//
+// Note it is not advised to provide a []byte which was converted from a 'human
+// readable' string using a subset of ASCII characters. To maximize entropy, you
+// should ideally be providing a []byte key which was produced from a
+// cryptographically random source, e.g. crypto/rand. Additional information
+// about this, and why we intentionally are not supporting string as a key can
+// be found on our usage guide
+// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types.
+func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key any) error {
+ // Verify the key is the right type
+ keyBytes, ok := key.([]byte)
+ if !ok {
+ return newError("HMAC verify expects []byte", ErrInvalidKeyType)
+ }
+
+ // Can we use the specified hashing method?
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+
+ // This signing method is symmetric, so we validate the signature
+ // by reproducing the signature from the signing string and key, then
+ // comparing that against the provided signature.
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+ if !hmac.Equal(sig, hasher.Sum(nil)) {
+ return ErrSignatureInvalid
+ }
+
+ // No validation errors. Signature is good.
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod. Key must be []byte.
+//
+// Note it is not advised to provide a []byte which was converted from a 'human
+// readable' string using a subset of ASCII characters. To maximize entropy, you
+// should ideally be providing a []byte key which was produced from a
+// cryptographically random source, e.g. crypto/rand. Additional information
+// about this, and why we intentionally are not supporting string as a key can
+// be found on our usage guide https://golang-jwt.github.io/jwt/usage/signing_methods/.
+func (m *SigningMethodHMAC) Sign(signingString string, key any) ([]byte, error) {
+ if keyBytes, ok := key.([]byte); ok {
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+
+ return hasher.Sum(nil), nil
+ }
+
+ return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
new file mode 100644
index 00000000..3b920527
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
@@ -0,0 +1,109 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// MapClaims is a claims type that uses the map[string]any for JSON
+// decoding. This is the default claims type if you don't supply one
+type MapClaims map[string]any
+
+// GetExpirationTime implements the Claims interface.
+func (m MapClaims) GetExpirationTime() (*NumericDate, error) {
+ return m.parseNumericDate("exp")
+}
+
+// GetNotBefore implements the Claims interface.
+func (m MapClaims) GetNotBefore() (*NumericDate, error) {
+ return m.parseNumericDate("nbf")
+}
+
+// GetIssuedAt implements the Claims interface.
+func (m MapClaims) GetIssuedAt() (*NumericDate, error) {
+ return m.parseNumericDate("iat")
+}
+
+// GetAudience implements the Claims interface.
+func (m MapClaims) GetAudience() (ClaimStrings, error) {
+ return m.parseClaimsString("aud")
+}
+
+// GetIssuer implements the Claims interface.
+func (m MapClaims) GetIssuer() (string, error) {
+ return m.parseString("iss")
+}
+
+// GetSubject implements the Claims interface.
+func (m MapClaims) GetSubject() (string, error) {
+ return m.parseString("sub")
+}
+
+// parseNumericDate tries to parse a key in the map claims type as a number
+// date. This will succeed, if the underlying type is either a [float64] or a
+// [json.Number]. Otherwise, nil will be returned.
+func (m MapClaims) parseNumericDate(key string) (*NumericDate, error) {
+ v, ok := m[key]
+ if !ok {
+ return nil, nil
+ }
+
+ switch exp := v.(type) {
+ case float64:
+ if exp == 0 {
+ return nil, nil
+ }
+
+ return newNumericDateFromSeconds(exp), nil
+ case json.Number:
+ v, _ := exp.Float64()
+
+ return newNumericDateFromSeconds(v), nil
+ }
+
+ return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+}
+
+// parseClaimsString tries to parse a key in the map claims type as a
+// [ClaimsStrings] type, which can either be a string or an array of string.
+func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) {
+ var cs []string
+ switch v := m[key].(type) {
+ case string:
+ cs = append(cs, v)
+ case []string:
+ cs = v
+ case []any:
+ for _, a := range v {
+ vs, ok := a.(string)
+ if !ok {
+ return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+ }
+ cs = append(cs, vs)
+ }
+ }
+
+ return cs, nil
+}
+
+// parseString tries to parse a key in the map claims type as a [string] type.
+// If the key does not exist, an empty string is returned. If the key has the
+// wrong type, an error is returned.
+func (m MapClaims) parseString(key string) (string, error) {
+ var (
+ ok bool
+ raw any
+ iss string
+ )
+ raw, ok = m[key]
+ if !ok {
+ return "", nil
+ }
+
+ iss, ok = raw.(string)
+ if !ok {
+ return "", newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+ }
+
+ return iss, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/none.go b/vendor/github.com/golang-jwt/jwt/v5/none.go
new file mode 100644
index 00000000..624ad55e
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/none.go
@@ -0,0 +1,50 @@
+package jwt
+
+// SigningMethodNone implements the none signing method. This is required by the spec
+// but you probably should never use it.
+var SigningMethodNone *signingMethodNone
+
+const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
+
+var NoneSignatureTypeDisallowedError error
+
+type signingMethodNone struct{}
+type unsafeNoneMagicConstant string
+
+func init() {
+ SigningMethodNone = &signingMethodNone{}
+ NoneSignatureTypeDisallowedError = newError("'none' signature type is not allowed", ErrTokenUnverifiable)
+
+ RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
+ return SigningMethodNone
+ })
+}
+
+func (m *signingMethodNone) Alg() string {
+ return "none"
+}
+
+// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Verify(signingString string, sig []byte, key any) (err error) {
+ // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
+ // accepting 'none' signing method
+ if _, ok := key.(unsafeNoneMagicConstant); !ok {
+ return NoneSignatureTypeDisallowedError
+ }
+ // If signing method is none, signature must be an empty string
+ if len(sig) != 0 {
+ return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable)
+ }
+
+ // Accept 'none' signing method.
+ return nil
+}
+
+// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Sign(signingString string, key any) ([]byte, error) {
+ if _, ok := key.(unsafeNoneMagicConstant); ok {
+ return []byte{}, nil
+ }
+
+ return nil, NoneSignatureTypeDisallowedError
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go
new file mode 100644
index 00000000..054c7eb6
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser.go
@@ -0,0 +1,268 @@
+package jwt
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+const tokenDelimiter = "."
+
+type Parser struct {
+ // If populated, only these methods will be considered valid.
+ validMethods []string
+
+ // Use JSON Number format in JSON decoder.
+ useJSONNumber bool
+
+ // Skip claims validation during token parsing.
+ skipClaimsValidation bool
+
+ validator *Validator
+
+ decodeStrict bool
+
+ decodePaddingAllowed bool
+}
+
+// NewParser creates a new Parser with the specified options
+func NewParser(options ...ParserOption) *Parser {
+ p := &Parser{
+ validator: &Validator{},
+ }
+
+ // Loop through our parsing options and apply them
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the key for validating.
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims
+// interface. This provides default values which can be overridden and allows a caller to use their own type, rather
+// than the default MapClaims implementation of Claims.
+//
+// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
+// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
+// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ token, parts, err := p.ParseUnverified(tokenString, claims)
+ if err != nil {
+ return token, err
+ }
+
+ // Verify signing method is in the required set
+ if p.validMethods != nil {
+ var signingMethodValid = false
+ var alg = token.Method.Alg()
+ for _, m := range p.validMethods {
+ if m == alg {
+ signingMethodValid = true
+ break
+ }
+ }
+ if !signingMethodValid {
+ // signing method is not in the listed set
+ return token, newError(fmt.Sprintf("signing method %v is invalid", alg), ErrTokenSignatureInvalid)
+ }
+ }
+
+ // Decode signature
+ token.Signature, err = p.DecodeSegment(parts[2])
+ if err != nil {
+ return token, newError("could not base64 decode signature", ErrTokenMalformed, err)
+ }
+ text := strings.Join(parts[0:2], ".")
+
+ // Lookup key(s)
+ if keyFunc == nil {
+ // keyFunc was not provided. short circuiting validation
+ return token, newError("no keyfunc was provided", ErrTokenUnverifiable)
+ }
+
+ got, err := keyFunc(token)
+ if err != nil {
+ return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err)
+ }
+
+ switch have := got.(type) {
+ case VerificationKeySet:
+ if len(have.Keys) == 0 {
+ return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable)
+ }
+ // Iterate through keys and verify signature, skipping the rest when a match is found.
+ // Return the last error if no match is found.
+ for _, key := range have.Keys {
+ if err = token.Method.Verify(text, token.Signature, key); err == nil {
+ break
+ }
+ }
+ default:
+ err = token.Method.Verify(text, token.Signature, have)
+ }
+ if err != nil {
+ return token, newError("", ErrTokenSignatureInvalid, err)
+ }
+
+ // Validate Claims
+ if !p.skipClaimsValidation {
+ // Make sure we have at least a default validator
+ if p.validator == nil {
+ p.validator = NewValidator()
+ }
+
+ if err := p.validator.Validate(claims); err != nil {
+ return token, newError("", ErrTokenInvalidClaims, err)
+ }
+ }
+
+ // No errors so far, token is valid.
+ token.Valid = true
+
+ return token, nil
+}
+
+// ParseUnverified parses the token but doesn't validate the signature.
+//
+// WARNING: Don't use this method unless you know what you're doing.
+//
+// It's only ever useful in cases where you know the signature is valid (since it has already
+// been or will be checked elsewhere in the stack) and you want to extract values from it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+ var ok bool
+ parts, ok = splitToken(tokenString)
+ if !ok {
+ return nil, nil, newError("token contains an invalid number of segments", ErrTokenMalformed)
+ }
+
+ token = &Token{Raw: tokenString}
+
+ // parse Header
+ var headerBytes []byte
+ if headerBytes, err = p.DecodeSegment(parts[0]); err != nil {
+ return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err)
+ }
+ if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+ return token, parts, newError("could not JSON decode header", ErrTokenMalformed, err)
+ }
+
+ // parse Claims
+ token.Claims = claims
+
+ claimBytes, err := p.DecodeSegment(parts[1])
+ if err != nil {
+ return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err)
+ }
+
+ // If `useJSONNumber` is enabled then we must use *json.Decoder to decode
+ // the claims. However, this comes with a performance penalty so only use
+ // it if we must and, otherwise, simple use json.Unmarshal.
+ if !p.useJSONNumber {
+ // JSON Unmarshal. Special case for map type to avoid weird pointer behavior.
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = json.Unmarshal(claimBytes, &c)
+ } else {
+ err = json.Unmarshal(claimBytes, &claims)
+ }
+ } else {
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ dec.UseNumber()
+ // JSON Decode. Special case for map type to avoid weird pointer behavior.
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
+ }
+ if err != nil {
+ return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err)
+ }
+
+ // Lookup signature method
+ if method, ok := token.Header["alg"].(string); ok {
+ if token.Method = GetSigningMethod(method); token.Method == nil {
+ return token, parts, newError("signing method (alg) is unavailable", ErrTokenUnverifiable)
+ }
+ } else {
+ return token, parts, newError("signing method (alg) is unspecified", ErrTokenUnverifiable)
+ }
+
+ return token, parts, nil
+}
+
+// splitToken splits a token string into three parts: header, claims, and signature. It will only
+// return true if the token contains exactly two delimiters and three parts. In all other cases, it
+// will return nil parts and false.
+func splitToken(token string) ([]string, bool) {
+ parts := make([]string, 3)
+ header, remain, ok := strings.Cut(token, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[0] = header
+ claims, remain, ok := strings.Cut(remain, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[1] = claims
+ // One more cut to ensure the signature is the last part of the token and there are no more
+ // delimiters. This avoids an issue where malicious input could contain additional delimiters
+ // causing unecessary overhead parsing tokens.
+ signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
+ if unexpected {
+ return nil, false
+ }
+ parts[2] = signature
+
+ return parts, true
+}
+
+// DecodeSegment decodes a JWT specific base64url encoding. This function will
+// take into account whether the [Parser] is configured with additional options,
+// such as [WithStrictDecoding] or [WithPaddingAllowed].
+func (p *Parser) DecodeSegment(seg string) ([]byte, error) {
+ encoding := base64.RawURLEncoding
+
+ if p.decodePaddingAllowed {
+ if l := len(seg) % 4; l > 0 {
+ seg += strings.Repeat("=", 4-l)
+ }
+ encoding = base64.URLEncoding
+ }
+
+ if p.decodeStrict {
+ encoding = encoding.Strict()
+ }
+ return encoding.DecodeString(seg)
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the cryptographic key
+// for verifying the signature. The caller is strongly encouraged to set the
+// WithValidMethods option to validate the 'alg' claim in the token matches the
+// expected algorithm. For more details about the importance of validating the
+// 'alg' claim, see
+// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
+func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).Parse(tokenString, keyFunc)
+}
+
+// ParseWithClaims is a shortcut for NewParser().ParseWithClaims().
+//
+// Note: If you provide a custom claim implementation that embeds one of the
+// standard claims (such as RegisteredClaims), make sure that a) you either
+// embed a non-pointer version of the claims or b) if you are using a pointer,
+// allocate the proper memory for it before passing in the overall claims,
+// otherwise you might run into a panic.
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
new file mode 100644
index 00000000..43157355
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
@@ -0,0 +1,145 @@
+package jwt
+
+import "time"
+
+// ParserOption is used to implement functional-style options that modify the
+// behavior of the parser. To add new options, just create a function (ideally
+// beginning with With or Without) that returns an anonymous function that takes
+// a *Parser type as input and manipulates its configuration accordingly.
+type ParserOption func(*Parser)
+
+// WithValidMethods is an option to supply algorithm methods that the parser
+// will check. Only those methods will be considered valid. It is heavily
+// encouraged to use this option in order to prevent attacks such as
+// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/.
+func WithValidMethods(methods []string) ParserOption {
+ return func(p *Parser) {
+ p.validMethods = methods
+ }
+}
+
+// WithJSONNumber is an option to configure the underlying JSON parser with
+// UseNumber.
+func WithJSONNumber() ParserOption {
+ return func(p *Parser) {
+ p.useJSONNumber = true
+ }
+}
+
+// WithoutClaimsValidation is an option to disable claims validation. This
+// option should only be used if you exactly know what you are doing.
+func WithoutClaimsValidation() ParserOption {
+ return func(p *Parser) {
+ p.skipClaimsValidation = true
+ }
+}
+
+// WithLeeway returns the ParserOption for specifying the leeway window.
+func WithLeeway(leeway time.Duration) ParserOption {
+ return func(p *Parser) {
+ p.validator.leeway = leeway
+ }
+}
+
+// WithTimeFunc returns the ParserOption for specifying the time func. The
+// primary use-case for this is testing. If you are looking for a way to account
+// for clock-skew, WithLeeway should be used instead.
+func WithTimeFunc(f func() time.Time) ParserOption {
+ return func(p *Parser) {
+ p.validator.timeFunc = f
+ }
+}
+
+// WithIssuedAt returns the ParserOption to enable verification
+// of issued-at.
+func WithIssuedAt() ParserOption {
+ return func(p *Parser) {
+ p.validator.verifyIat = true
+ }
+}
+
+// WithExpirationRequired returns the ParserOption to make exp claim required.
+// By default exp claim is optional.
+func WithExpirationRequired() ParserOption {
+ return func(p *Parser) {
+ p.validator.requireExp = true
+ }
+}
+
+// WithAudience configures the validator to require any of the specified
+// audiences in the `aud` claim. Validation will fail if the audience is not
+// listed in the token or the `aud` claim is missing.
+//
+// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an audience is expected.
+func WithAudience(aud ...string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedAud = aud
+ }
+}
+
+// WithAllAudiences configures the validator to require all the specified
+// audiences in the `aud` claim. Validation will fail if the specified audiences
+// are not listed in the token or the `aud` claim is missing. Duplicates within
+// the list are de-duplicated since internally, we use a map to look up the
+// audiences.
+//
+// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an audience is expected.
+func WithAllAudiences(aud ...string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedAud = aud
+ p.validator.expectAllAud = true
+ }
+}
+
+// WithIssuer configures the validator to require the specified issuer in the
+// `iss` claim. Validation will fail if a different issuer is specified in the
+// token or the `iss` claim is missing.
+//
+// NOTE: While the `iss` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an issuer is expected.
+func WithIssuer(iss string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedIss = iss
+ }
+}
+
+// WithSubject configures the validator to require the specified subject in the
+// `sub` claim. Validation will fail if a different subject is specified in the
+// token or the `sub` claim is missing.
+//
+// NOTE: While the `sub` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if a subject is expected.
+func WithSubject(sub string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedSub = sub
+ }
+}
+
+// WithPaddingAllowed will enable the codec used for decoding JWTs to allow
+// padding. Note that the JWS RFC7515 states that the tokens will utilize a
+// Base64url encoding with no padding. Unfortunately, some implementations of
+// JWT are producing non-standard tokens, and thus require support for decoding.
+func WithPaddingAllowed() ParserOption {
+ return func(p *Parser) {
+ p.decodePaddingAllowed = true
+ }
+}
+
+// WithStrictDecoding will switch the codec used for decoding JWTs into strict
+// mode. In this mode, the decoder requires that trailing padding bits are zero,
+// as described in RFC 4648 section 3.5.
+func WithStrictDecoding() ParserOption {
+ return func(p *Parser) {
+ p.decodeStrict = true
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
new file mode 100644
index 00000000..77951a53
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
@@ -0,0 +1,63 @@
+package jwt
+
+// RegisteredClaims are a structured version of the JWT Claims Set,
+// restricted to Registered Claim Names, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1
+//
+// This type can be used on its own, but then additional private and
+// public claims embedded in the JWT will not be parsed. The typical use-case
+// therefore is to embedded this in a user-defined claim type.
+//
+// See examples for how to use this with your own claim types.
+type RegisteredClaims struct {
+ // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1
+ Issuer string `json:"iss,omitempty"`
+
+ // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2
+ Subject string `json:"sub,omitempty"`
+
+ // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3
+ Audience ClaimStrings `json:"aud,omitempty"`
+
+ // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4
+ ExpiresAt *NumericDate `json:"exp,omitempty"`
+
+ // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5
+ NotBefore *NumericDate `json:"nbf,omitempty"`
+
+ // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6
+ IssuedAt *NumericDate `json:"iat,omitempty"`
+
+ // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7
+ ID string `json:"jti,omitempty"`
+}
+
+// GetExpirationTime implements the Claims interface.
+func (c RegisteredClaims) GetExpirationTime() (*NumericDate, error) {
+ return c.ExpiresAt, nil
+}
+
+// GetNotBefore implements the Claims interface.
+func (c RegisteredClaims) GetNotBefore() (*NumericDate, error) {
+ return c.NotBefore, nil
+}
+
+// GetIssuedAt implements the Claims interface.
+func (c RegisteredClaims) GetIssuedAt() (*NumericDate, error) {
+ return c.IssuedAt, nil
+}
+
+// GetAudience implements the Claims interface.
+func (c RegisteredClaims) GetAudience() (ClaimStrings, error) {
+ return c.Audience, nil
+}
+
+// GetIssuer implements the Claims interface.
+func (c RegisteredClaims) GetIssuer() (string, error) {
+ return c.Issuer, nil
+}
+
+// GetSubject implements the Claims interface.
+func (c RegisteredClaims) GetSubject() (string, error) {
+ return c.Subject, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
new file mode 100644
index 00000000..98b960a7
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
@@ -0,0 +1,93 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSA implements the RSA family of signing methods.
+// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+type SigningMethodRSA struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for RS256 and company
+var (
+ SigningMethodRS256 *SigningMethodRSA
+ SigningMethodRS384 *SigningMethodRSA
+ SigningMethodRS512 *SigningMethodRSA
+)
+
+func init() {
+ // RS256
+ SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+ return SigningMethodRS256
+ })
+
+ // RS384
+ SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+ return SigningMethodRS384
+ })
+
+ // RS512
+ SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+ return SigningMethodRS512
+ })
+}
+
+func (m *SigningMethodRSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod
+// For this signing method, must be an *rsa.PublicKey structure.
+func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key any) error {
+ var rsaKey *rsa.PublicKey
+ var ok bool
+
+ if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+ return newError("RSA verify expects *rsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+}
+
+// Sign implements token signing for the SigningMethod
+// For this signing method, must be an *rsa.PrivateKey structure.
+func (m *SigningMethodRSA) Sign(signingString string, key any) ([]byte, error) {
+ var rsaKey *rsa.PrivateKey
+ var ok bool
+
+ // Validate type of key
+ if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+ return nil, newError("RSA sign expects *rsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+ return sigBytes, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
new file mode 100644
index 00000000..f17590cc
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
@@ -0,0 +1,132 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
+type SigningMethodRSAPSS struct {
+ *SigningMethodRSA
+ Options *rsa.PSSOptions
+ // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
+ // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
+ // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
+ // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
+ VerifyOptions *rsa.PSSOptions
+}
+
+// Specific instances for RS/PS and company.
+var (
+ SigningMethodPS256 *SigningMethodRSAPSS
+ SigningMethodPS384 *SigningMethodRSAPSS
+ SigningMethodPS512 *SigningMethodRSAPSS
+)
+
+func init() {
+ // PS256
+ SigningMethodPS256 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS256",
+ Hash: crypto.SHA256,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+ return SigningMethodPS256
+ })
+
+ // PS384
+ SigningMethodPS384 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS384",
+ Hash: crypto.SHA384,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+ return SigningMethodPS384
+ })
+
+ // PS512
+ SigningMethodPS512 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS512",
+ Hash: crypto.SHA512,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+ return SigningMethodPS512
+ })
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an rsa.PublicKey struct
+func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key any) error {
+ var rsaKey *rsa.PublicKey
+ switch k := key.(type) {
+ case *rsa.PublicKey:
+ rsaKey = k
+ default:
+ return newError("RSA-PSS verify expects *rsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ opts := m.Options
+ if m.VerifyOptions != nil {
+ opts = m.VerifyOptions
+ }
+
+ return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an rsa.PrivateKey struct
+func (m *SigningMethodRSAPSS) Sign(signingString string, key any) ([]byte, error) {
+ var rsaKey *rsa.PrivateKey
+
+ switch k := key.(type) {
+ case *rsa.PrivateKey:
+ rsaKey = k
+ default:
+ return nil, newError("RSA-PSS sign expects *rsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+ return sigBytes, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
new file mode 100644
index 00000000..f22c3d06
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
@@ -0,0 +1,107 @@
+package jwt
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
+ ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
+ ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
+)
+
+// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey any
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
+//
+// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
+// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
+// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey any
+
+ var blockDecrypted []byte
+ if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+ return nil, err
+ }
+
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPublicKeyFromPEM parses a certificate or a PEM encoded PKCS1 or PKIX public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey any
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ if parsedKey, err = x509.ParsePKCS1PublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ var pkey *rsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, ErrNotRSAPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
new file mode 100644
index 00000000..096d0ed4
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
@@ -0,0 +1,49 @@
+package jwt
+
+import (
+ "sync"
+)
+
+var signingMethods = map[string]func() SigningMethod{}
+var signingMethodLock = new(sync.RWMutex)
+
+// SigningMethod can be used add new methods for signing or verifying tokens. It
+// takes a decoded signature as an input in the Verify function and produces a
+// signature in Sign. The signature is then usually base64 encoded as part of a
+// JWT.
+type SigningMethod interface {
+ Verify(signingString string, sig []byte, key any) error // Returns nil if signature is valid
+ Sign(signingString string, key any) ([]byte, error) // Returns signature or error
+ Alg() string // returns the alg identifier for this method (example: 'HS256')
+}
+
+// RegisterSigningMethod registers the "alg" name and a factory function for signing method.
+// This is typically done during init() in the method's implementation
+func RegisterSigningMethod(alg string, f func() SigningMethod) {
+ signingMethodLock.Lock()
+ defer signingMethodLock.Unlock()
+
+ signingMethods[alg] = f
+}
+
+// GetSigningMethod retrieves a signing method from an "alg" string
+func GetSigningMethod(alg string) (method SigningMethod) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ if methodF, ok := signingMethods[alg]; ok {
+ method = methodF()
+ }
+ return
+}
+
+// GetAlgorithms returns a list of registered "alg" names
+func GetAlgorithms() (algs []string) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ for alg := range signingMethods {
+ algs = append(algs, alg)
+ }
+ return
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
new file mode 100644
index 00000000..53745d51
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
@@ -0,0 +1 @@
+checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"]
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go
new file mode 100644
index 00000000..3f715588
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/token.go
@@ -0,0 +1,100 @@
+package jwt
+
+import (
+ "crypto"
+ "encoding/base64"
+ "encoding/json"
+)
+
+// Keyfunc will be used by the Parse methods as a callback function to supply
+// the key for verification. The function receives the parsed, but unverified
+// Token. This allows you to use properties in the Header of the token (such as
+// `kid`) to identify which key to use.
+//
+// The returned any may be a single key or a VerificationKeySet containing
+// multiple keys.
+type Keyfunc func(*Token) (any, error)
+
+// VerificationKey represents a public or secret key for verifying a token's signature.
+type VerificationKey interface {
+ crypto.PublicKey | []uint8
+}
+
+// VerificationKeySet is a set of public or secret keys. It is used by the parser to verify a token.
+type VerificationKeySet struct {
+ Keys []VerificationKey
+}
+
+// Token represents a JWT Token. Different fields will be used depending on
+// whether you're creating or parsing/verifying a token.
+type Token struct {
+ Raw string // Raw contains the raw token. Populated when you [Parse] a token
+ Method SigningMethod // Method is the signing method used or to be used
+ Header map[string]any // Header is the first segment of the token in decoded form
+ Claims Claims // Claims is the second segment of the token in decoded form
+ Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
+ Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
+}
+
+// New creates a new [Token] with the specified signing method and an empty map
+// of claims. Additional options can be specified, but are currently unused.
+func New(method SigningMethod, opts ...TokenOption) *Token {
+ return NewWithClaims(method, MapClaims{}, opts...)
+}
+
+// NewWithClaims creates a new [Token] with the specified signing method and
+// claims. Additional options can be specified, but are currently unused.
+func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *Token {
+ return &Token{
+ Header: map[string]any{
+ "typ": "JWT",
+ "alg": method.Alg(),
+ },
+ Claims: claims,
+ Method: method,
+ }
+}
+
+// SignedString creates and returns a complete, signed JWT. The token is signed
+// using the SigningMethod specified in the token. Please refer to
+// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types
+// for an overview of the different signing methods and their respective key
+// types.
+func (t *Token) SignedString(key any) (string, error) {
+ sstr, err := t.SigningString()
+ if err != nil {
+ return "", err
+ }
+
+ sig, err := t.Method.Sign(sstr, key)
+ if err != nil {
+ return "", err
+ }
+
+ return sstr + "." + t.EncodeSegment(sig), nil
+}
+
+// SigningString generates the signing string. This is the most expensive part
+// of the whole deal. Unless you need this for something special, just go
+// straight for the SignedString.
+func (t *Token) SigningString() (string, error) {
+ h, err := json.Marshal(t.Header)
+ if err != nil {
+ return "", err
+ }
+
+ c, err := json.Marshal(t.Claims)
+ if err != nil {
+ return "", err
+ }
+
+ return t.EncodeSegment(h) + "." + t.EncodeSegment(c), nil
+}
+
+// EncodeSegment encodes a JWT specific base64url encoding with padding
+// stripped. In the future, this function might take into account a
+// [TokenOption]. Therefore, this function exists as a method of [Token], rather
+// than a global function.
+func (*Token) EncodeSegment(seg []byte) string {
+ return base64.RawURLEncoding.EncodeToString(seg)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token_option.go b/vendor/github.com/golang-jwt/jwt/v5/token_option.go
new file mode 100644
index 00000000..b4ae3bad
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/token_option.go
@@ -0,0 +1,5 @@
+package jwt
+
+// TokenOption is a reserved type, which provides some forward compatibility,
+// if we ever want to introduce token creation-related options.
+type TokenOption func(*Token)
diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go
new file mode 100644
index 00000000..a3e0ef12
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/types.go
@@ -0,0 +1,149 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+ "time"
+)
+
+// TimePrecision sets the precision of times and dates within this library. This
+// has an influence on the precision of times when comparing expiry or other
+// related time fields. Furthermore, it is also the precision of times when
+// serializing.
+//
+// For backwards compatibility the default precision is set to seconds, so that
+// no fractional timestamps are generated.
+var TimePrecision = time.Second
+
+// MarshalSingleStringAsArray modifies the behavior of the ClaimStrings type,
+// especially its MarshalJSON function.
+//
+// If it is set to true (the default), it will always serialize the type as an
+// array of strings, even if it just contains one element, defaulting to the
+// behavior of the underlying []string. If it is set to false, it will serialize
+// to a single string, if it contains one element. Otherwise, it will serialize
+// to an array of strings.
+var MarshalSingleStringAsArray = true
+
+// NumericDate represents a JSON numeric date value, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-2.
+type NumericDate struct {
+ time.Time
+}
+
+// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct.
+// It will truncate the timestamp according to the precision specified in TimePrecision.
+func NewNumericDate(t time.Time) *NumericDate {
+ return &NumericDate{t.Truncate(TimePrecision)}
+}
+
+// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a
+// UNIX epoch with the float fraction representing non-integer seconds.
+func newNumericDateFromSeconds(f float64) *NumericDate {
+ round, frac := math.Modf(f)
+ return NewNumericDate(time.Unix(int64(round), int64(frac*1e9)))
+}
+
+// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
+// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
+func (date NumericDate) MarshalJSON() (b []byte, err error) {
+ var prec int
+ if TimePrecision < time.Second {
+ prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
+ }
+ truncatedDate := date.Truncate(TimePrecision)
+
+ // For very large timestamps, UnixNano would overflow an int64, but this
+ // function requires nanosecond level precision, so we have to use the
+ // following technique to get round the issue:
+ //
+ // 1. Take the normal unix timestamp to form the whole number part of the
+ // output,
+ // 2. Take the result of the Nanosecond function, which returns the offset
+ // within the second of the particular unix time instance, to form the
+ // decimal part of the output
+ // 3. Concatenate them to produce the final result
+ seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
+ nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
+
+ output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
+
+ return output, nil
+}
+
+// UnmarshalJSON is an implementation of the json.RawMessage interface and
+// deserializes a [NumericDate] from a JSON representation, i.e. a
+// [json.Number]. This number represents an UNIX epoch with either integer or
+// non-integer seconds.
+func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
+ var (
+ number json.Number
+ f float64
+ )
+
+ if err = json.Unmarshal(b, &number); err != nil {
+ return fmt.Errorf("could not parse NumericData: %w", err)
+ }
+
+ if f, err = number.Float64(); err != nil {
+ return fmt.Errorf("could not convert json number value to float: %w", err)
+ }
+
+ n := newNumericDateFromSeconds(f)
+ *date = *n
+
+ return nil
+}
+
+// ClaimStrings is basically just a slice of strings, but it can be either
+// serialized from a string array or just a string. This type is necessary,
+// since the "aud" claim can either be a single string or an array.
+type ClaimStrings []string
+
+func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
+ var value any
+
+ if err = json.Unmarshal(data, &value); err != nil {
+ return err
+ }
+
+ var aud []string
+
+ switch v := value.(type) {
+ case string:
+ aud = append(aud, v)
+ case []string:
+ aud = ClaimStrings(v)
+ case []any:
+ for _, vv := range v {
+ vs, ok := vv.(string)
+ if !ok {
+ return ErrInvalidType
+ }
+ aud = append(aud, vs)
+ }
+ case nil:
+ return nil
+ default:
+ return ErrInvalidType
+ }
+
+ *s = aud
+
+ return
+}
+
+func (s ClaimStrings) MarshalJSON() (b []byte, err error) {
+ // This handles a special case in the JWT RFC. If the string array, e.g.
+ // used by the "aud" field, only contains one element, it MAY be serialized
+ // as a single string. This may or may not be desired based on the ecosystem
+ // of other JWT library used, so we make it configurable by the variable
+ // MarshalSingleStringAsArray.
+ if len(s) == 1 && !MarshalSingleStringAsArray {
+ return json.Marshal(s[0])
+ }
+
+ return json.Marshal([]string(s))
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go
new file mode 100644
index 00000000..92b5c057
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/validator.go
@@ -0,0 +1,326 @@
+package jwt
+
+import (
+ "fmt"
+ "slices"
+ "time"
+)
+
+// ClaimsValidator is an interface that can be implemented by custom claims who
+// wish to execute any additional claims validation based on
+// application-specific logic. The Validate function is then executed in
+// addition to the regular claims validation and any error returned is appended
+// to the final validation result.
+//
+// type MyCustomClaims struct {
+// Foo string `json:"foo"`
+// jwt.RegisteredClaims
+// }
+//
+// func (m MyCustomClaims) Validate() error {
+// if m.Foo != "bar" {
+// return errors.New("must be foobar")
+// }
+// return nil
+// }
+type ClaimsValidator interface {
+ Claims
+ Validate() error
+}
+
+// Validator is the core of the new Validation API. It is automatically used by
+// a [Parser] during parsing and can be modified with various parser options.
+//
+// The [NewValidator] function should be used to create an instance of this
+// struct.
+type Validator struct {
+ // leeway is an optional leeway that can be provided to account for clock skew.
+ leeway time.Duration
+
+ // timeFunc is used to supply the current time that is needed for
+ // validation. If unspecified, this defaults to time.Now.
+ timeFunc func() time.Time
+
+ // requireExp specifies whether the exp claim is required
+ requireExp bool
+
+ // verifyIat specifies whether the iat (Issued At) claim will be verified.
+ // According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this
+ // only specifies the age of the token, but no validation check is
+ // necessary. However, if wanted, it can be checked if the iat is
+ // unrealistic, i.e., in the future.
+ verifyIat bool
+
+ // expectedAud contains the audience this token expects. Supplying an empty
+ // slice will disable aud checking.
+ expectedAud []string
+
+ // expectAllAud specifies whether all expected audiences must be present in
+ // the token. If false, only one of the expected audiences must be present.
+ expectAllAud bool
+
+ // expectedIss contains the issuer this token expects. Supplying an empty
+ // string will disable iss checking.
+ expectedIss string
+
+ // expectedSub contains the subject this token expects. Supplying an empty
+ // string will disable sub checking.
+ expectedSub string
+}
+
+// NewValidator can be used to create a stand-alone validator with the supplied
+// options. This validator can then be used to validate already parsed claims.
+//
+// Note: Under normal circumstances, explicitly creating a validator is not
+// needed and can potentially be dangerous; instead functions of the [Parser]
+// class should be used.
+//
+// The [Validator] is only checking the *validity* of the claims, such as its
+// expiration time, but it does NOT perform *signature verification* of the
+// token.
+func NewValidator(opts ...ParserOption) *Validator {
+ p := NewParser(opts...)
+ return p.validator
+}
+
+// Validate validates the given claims. It will also perform any custom
+// validation if claims implements the [ClaimsValidator] interface.
+//
+// Note: It will NOT perform any *signature verification* on the token that
+// contains the claims and expects that the [Claim] was already successfully
+// verified.
+func (v *Validator) Validate(claims Claims) error {
+ var (
+ now time.Time
+ errs = make([]error, 0, 6)
+ err error
+ )
+
+ // Check, if we have a time func
+ if v.timeFunc != nil {
+ now = v.timeFunc()
+ } else {
+ now = time.Now()
+ }
+
+ // We always need to check the expiration time, but usage of the claim
+ // itself is OPTIONAL by default. requireExp overrides this behavior
+ // and makes the exp claim mandatory.
+ if err = v.verifyExpiresAt(claims, now, v.requireExp); err != nil {
+ errs = append(errs, err)
+ }
+
+ // We always need to check not-before, but usage of the claim itself is
+ // OPTIONAL.
+ if err = v.verifyNotBefore(claims, now, false); err != nil {
+ errs = append(errs, err)
+ }
+
+ // Check issued-at if the option is enabled
+ if v.verifyIat {
+ if err = v.verifyIssuedAt(claims, now, false); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected audience, we also require the audience claim
+ if len(v.expectedAud) > 0 {
+ if err = v.verifyAudience(claims, v.expectedAud, v.expectAllAud); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected issuer, we also require the issuer claim
+ if v.expectedIss != "" {
+ if err = v.verifyIssuer(claims, v.expectedIss, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected subject, we also require the subject claim
+ if v.expectedSub != "" {
+ if err = v.verifySubject(claims, v.expectedSub, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // Finally, we want to give the claim itself some possibility to do some
+ // additional custom validation based on a custom Validate function.
+ cvt, ok := claims.(ClaimsValidator)
+ if ok {
+ if err := cvt.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ if len(errs) == 0 {
+ return nil
+ }
+
+ return joinErrors(errs...)
+}
+
+// verifyExpiresAt compares the exp claim in claims against cmp. This function
+// will succeed if cmp < exp. Additional leeway is taken into account.
+//
+// If exp is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error {
+ exp, err := claims.GetExpirationTime()
+ if err != nil {
+ return err
+ }
+
+ if exp == nil {
+ return errorIfRequired(required, "exp")
+ }
+
+ return errorIfFalse(cmp.Before((exp.Time).Add(+v.leeway)), ErrTokenExpired)
+}
+
+// verifyIssuedAt compares the iat claim in claims against cmp. This function
+// will succeed if cmp >= iat. Additional leeway is taken into account.
+//
+// If iat is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error {
+ iat, err := claims.GetIssuedAt()
+ if err != nil {
+ return err
+ }
+
+ if iat == nil {
+ return errorIfRequired(required, "iat")
+ }
+
+ return errorIfFalse(!cmp.Before(iat.Add(-v.leeway)), ErrTokenUsedBeforeIssued)
+}
+
+// verifyNotBefore compares the nbf claim in claims against cmp. This function
+// will return true if cmp >= nbf. Additional leeway is taken into account.
+//
+// If nbf is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error {
+ nbf, err := claims.GetNotBefore()
+ if err != nil {
+ return err
+ }
+
+ if nbf == nil {
+ return errorIfRequired(required, "nbf")
+ }
+
+ return errorIfFalse(!cmp.Before(nbf.Add(-v.leeway)), ErrTokenNotValidYet)
+}
+
+// verifyAudience compares the aud claim against cmp.
+//
+// If aud is not set or an empty list, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyAudience(claims Claims, cmp []string, expectAllAud bool) error {
+ aud, err := claims.GetAudience()
+ if err != nil {
+ return err
+ }
+
+ // Check that aud exists and is not empty. We only require the aud claim
+ // if we expect at least one audience to be present.
+ if len(aud) == 0 || len(aud) == 1 && aud[0] == "" {
+ required := len(v.expectedAud) > 0
+ return errorIfRequired(required, "aud")
+ }
+
+ if !expectAllAud {
+ for _, a := range aud {
+ // If we only expect one match, we can stop early if we find a match
+ if slices.Contains(cmp, a) {
+ return nil
+ }
+ }
+
+ return ErrTokenInvalidAudience
+ }
+
+ // Note that we are looping cmp here to ensure that all expected audiences
+ // are present in the aud claim.
+ for _, a := range cmp {
+ if !slices.Contains(aud, a) {
+ return ErrTokenInvalidAudience
+ }
+ }
+
+ return nil
+}
+
+// verifyIssuer compares the iss claim in claims against cmp.
+//
+// If iss is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyIssuer(claims Claims, cmp string, required bool) error {
+ iss, err := claims.GetIssuer()
+ if err != nil {
+ return err
+ }
+
+ if iss == "" {
+ return errorIfRequired(required, "iss")
+ }
+
+ return errorIfFalse(iss == cmp, ErrTokenInvalidIssuer)
+}
+
+// verifySubject compares the sub claim against cmp.
+//
+// If sub is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifySubject(claims Claims, cmp string, required bool) error {
+ sub, err := claims.GetSubject()
+ if err != nil {
+ return err
+ }
+
+ if sub == "" {
+ return errorIfRequired(required, "sub")
+ }
+
+ return errorIfFalse(sub == cmp, ErrTokenInvalidSubject)
+}
+
+// errorIfFalse returns the error specified in err, if the value is true.
+// Otherwise, nil is returned.
+func errorIfFalse(value bool, err error) error {
+ if value {
+ return nil
+ } else {
+ return err
+ }
+}
+
+// errorIfRequired returns an ErrTokenRequiredClaimMissing error if required is
+// true. Otherwise, nil is returned.
+func errorIfRequired(required bool, claim string) error {
+ if required {
+ return newError(fmt.Sprintf("%s claim is required", claim), ErrTokenRequiredClaimMissing)
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
deleted file mode 100644
index 15167cd7..00000000
--- a/vendor/github.com/golang/protobuf/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
deleted file mode 100644
index 1c4577e9..00000000
--- a/vendor/github.com/golang/protobuf/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
deleted file mode 100644
index 0f646931..00000000
--- a/vendor/github.com/golang/protobuf/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright 2010 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
deleted file mode 100644
index e810e6fe..00000000
--- a/vendor/github.com/golang/protobuf/proto/buffer.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "errors"
- "fmt"
-
- "google.golang.org/protobuf/encoding/prototext"
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- WireVarint = 0
- WireFixed32 = 5
- WireFixed64 = 1
- WireBytes = 2
- WireStartGroup = 3
- WireEndGroup = 4
-)
-
-// EncodeVarint returns the varint encoded bytes of v.
-func EncodeVarint(v uint64) []byte {
- return protowire.AppendVarint(nil, v)
-}
-
-// SizeVarint returns the length of the varint encoded bytes of v.
-// This is equal to len(EncodeVarint(v)).
-func SizeVarint(v uint64) int {
- return protowire.SizeVarint(v)
-}
-
-// DecodeVarint parses a varint encoded integer from b,
-// returning the integer value and the length of the varint.
-// It returns (0, 0) if there is a parse error.
-func DecodeVarint(b []byte) (uint64, int) {
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return 0, 0
- }
- return v, n
-}
-
-// Buffer is a buffer for encoding and decoding the protobuf wire format.
-// It may be reused between invocations to reduce memory usage.
-type Buffer struct {
- buf []byte
- idx int
- deterministic bool
-}
-
-// NewBuffer allocates a new Buffer initialized with buf,
-// where the contents of buf are considered the unread portion of the buffer.
-func NewBuffer(buf []byte) *Buffer {
- return &Buffer{buf: buf}
-}
-
-// SetDeterministic specifies whether to use deterministic serialization.
-//
-// Deterministic serialization guarantees that for a given binary, equal
-// messages will always be serialized to the same bytes. This implies:
-//
-// - Repeated serialization of a message will return the same bytes.
-// - Different processes of the same binary (which may be executing on
-// different machines) will serialize equal messages to the same bytes.
-//
-// Note that the deterministic serialization is NOT canonical across
-// languages. It is not guaranteed to remain stable over time. It is unstable
-// across different builds with schema changes due to unknown fields.
-// Users who need canonical serialization (e.g., persistent storage in a
-// canonical form, fingerprinting, etc.) should define their own
-// canonicalization specification and implement their own serializer rather
-// than relying on this API.
-//
-// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
-// subject to change.
-func (b *Buffer) SetDeterministic(deterministic bool) {
- b.deterministic = deterministic
-}
-
-// SetBuf sets buf as the internal buffer,
-// where the contents of buf are considered the unread portion of the buffer.
-func (b *Buffer) SetBuf(buf []byte) {
- b.buf = buf
- b.idx = 0
-}
-
-// Reset clears the internal buffer of all written and unread data.
-func (b *Buffer) Reset() {
- b.buf = b.buf[:0]
- b.idx = 0
-}
-
-// Bytes returns the internal buffer.
-func (b *Buffer) Bytes() []byte {
- return b.buf
-}
-
-// Unread returns the unread portion of the buffer.
-func (b *Buffer) Unread() []byte {
- return b.buf[b.idx:]
-}
-
-// Marshal appends the wire-format encoding of m to the buffer.
-func (b *Buffer) Marshal(m Message) error {
- var err error
- b.buf, err = marshalAppend(b.buf, m, b.deterministic)
- return err
-}
-
-// Unmarshal parses the wire-format message in the buffer and
-// places the decoded results in m.
-// It does not reset m before unmarshaling.
-func (b *Buffer) Unmarshal(m Message) error {
- err := UnmarshalMerge(b.Unread(), m)
- b.idx = len(b.buf)
- return err
-}
-
-type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
-
-func (m *unknownFields) String() string { panic("not implemented") }
-func (m *unknownFields) Reset() { panic("not implemented") }
-func (m *unknownFields) ProtoMessage() { panic("not implemented") }
-
-// DebugPrint dumps the encoded bytes of b with a header and footer including s
-// to stdout. This is only intended for debugging.
-func (*Buffer) DebugPrint(s string, b []byte) {
- m := MessageReflect(new(unknownFields))
- m.SetUnknown(b)
- b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
- fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
-}
-
-// EncodeVarint appends an unsigned varint encoding to the buffer.
-func (b *Buffer) EncodeVarint(v uint64) error {
- b.buf = protowire.AppendVarint(b.buf, v)
- return nil
-}
-
-// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
-func (b *Buffer) EncodeZigzag32(v uint64) error {
- return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
-}
-
-// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
-func (b *Buffer) EncodeZigzag64(v uint64) error {
- return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
-}
-
-// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
-func (b *Buffer) EncodeFixed32(v uint64) error {
- b.buf = protowire.AppendFixed32(b.buf, uint32(v))
- return nil
-}
-
-// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
-func (b *Buffer) EncodeFixed64(v uint64) error {
- b.buf = protowire.AppendFixed64(b.buf, uint64(v))
- return nil
-}
-
-// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
-func (b *Buffer) EncodeRawBytes(v []byte) error {
- b.buf = protowire.AppendBytes(b.buf, v)
- return nil
-}
-
-// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
-// It does not validate whether v contains valid UTF-8.
-func (b *Buffer) EncodeStringBytes(v string) error {
- b.buf = protowire.AppendString(b.buf, v)
- return nil
-}
-
-// EncodeMessage appends a length-prefixed encoded message to the buffer.
-func (b *Buffer) EncodeMessage(m Message) error {
- var err error
- b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
- b.buf, err = marshalAppend(b.buf, m, b.deterministic)
- return err
-}
-
-// DecodeVarint consumes an encoded unsigned varint from the buffer.
-func (b *Buffer) DecodeVarint() (uint64, error) {
- v, n := protowire.ConsumeVarint(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
-func (b *Buffer) DecodeZigzag32() (uint64, error) {
- v, err := b.DecodeVarint()
- if err != nil {
- return 0, err
- }
- return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
-}
-
-// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
-func (b *Buffer) DecodeZigzag64() (uint64, error) {
- v, err := b.DecodeVarint()
- if err != nil {
- return 0, err
- }
- return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
-}
-
-// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
-func (b *Buffer) DecodeFixed32() (uint64, error) {
- v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
-func (b *Buffer) DecodeFixed64() (uint64, error) {
- v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
-// If alloc is specified, it returns a copy the raw bytes
-// rather than a sub-slice of the buffer.
-func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
- v, n := protowire.ConsumeBytes(b.buf[b.idx:])
- if n < 0 {
- return nil, protowire.ParseError(n)
- }
- b.idx += n
- if alloc {
- v = append([]byte(nil), v...)
- }
- return v, nil
-}
-
-// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
-// It does not validate whether the raw bytes contain valid UTF-8.
-func (b *Buffer) DecodeStringBytes() (string, error) {
- v, n := protowire.ConsumeString(b.buf[b.idx:])
- if n < 0 {
- return "", protowire.ParseError(n)
- }
- b.idx += n
- return v, nil
-}
-
-// DecodeMessage consumes a length-prefixed message from the buffer.
-// It does not reset m before unmarshaling.
-func (b *Buffer) DecodeMessage(m Message) error {
- v, err := b.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- return UnmarshalMerge(v, m)
-}
-
-// DecodeGroup consumes a message group from the buffer.
-// It assumes that the start group marker has already been consumed and
-// consumes all bytes until (and including the end group marker).
-// It does not reset m before unmarshaling.
-func (b *Buffer) DecodeGroup(m Message) error {
- v, n, err := consumeGroup(b.buf[b.idx:])
- if err != nil {
- return err
- }
- b.idx += n
- return UnmarshalMerge(v, m)
-}
-
-// consumeGroup parses b until it finds an end group marker, returning
-// the raw bytes of the message (excluding the end group marker) and the
-// the total length of the message (including the end group marker).
-func consumeGroup(b []byte) ([]byte, int, error) {
- b0 := b
- depth := 1 // assume this follows a start group marker
- for {
- _, wtyp, tagLen := protowire.ConsumeTag(b)
- if tagLen < 0 {
- return nil, 0, protowire.ParseError(tagLen)
- }
- b = b[tagLen:]
-
- var valLen int
- switch wtyp {
- case protowire.VarintType:
- _, valLen = protowire.ConsumeVarint(b)
- case protowire.Fixed32Type:
- _, valLen = protowire.ConsumeFixed32(b)
- case protowire.Fixed64Type:
- _, valLen = protowire.ConsumeFixed64(b)
- case protowire.BytesType:
- _, valLen = protowire.ConsumeBytes(b)
- case protowire.StartGroupType:
- depth++
- case protowire.EndGroupType:
- depth--
- default:
- return nil, 0, errors.New("proto: cannot parse reserved wire type")
- }
- if valLen < 0 {
- return nil, 0, protowire.ParseError(valLen)
- }
- b = b[valLen:]
-
- if depth == 0 {
- return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
- }
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
deleted file mode 100644
index d399bf06..00000000
--- a/vendor/github.com/golang/protobuf/proto/defaults.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// SetDefaults sets unpopulated scalar fields to their default values.
-// Fields within a oneof are not set even if they have a default value.
-// SetDefaults is recursively called upon any populated message fields.
-func SetDefaults(m Message) {
- if m != nil {
- setDefaults(MessageReflect(m))
- }
-}
-
-func setDefaults(m protoreflect.Message) {
- fds := m.Descriptor().Fields()
- for i := 0; i < fds.Len(); i++ {
- fd := fds.Get(i)
- if !m.Has(fd) {
- if fd.HasDefault() && fd.ContainingOneof() == nil {
- v := fd.Default()
- if fd.Kind() == protoreflect.BytesKind {
- v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
- }
- m.Set(fd, v)
- }
- continue
- }
- }
-
- m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- switch {
- // Handle singular message.
- case fd.Cardinality() != protoreflect.Repeated:
- if fd.Message() != nil {
- setDefaults(m.Get(fd).Message())
- }
- // Handle list of messages.
- case fd.IsList():
- if fd.Message() != nil {
- ls := m.Get(fd).List()
- for i := 0; i < ls.Len(); i++ {
- setDefaults(ls.Get(i).Message())
- }
- }
- // Handle map of messages.
- case fd.IsMap():
- if fd.MapValue().Message() != nil {
- ms := m.Get(fd).Map()
- ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
- setDefaults(v.Message())
- return true
- })
- }
- }
- return true
- })
-}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
deleted file mode 100644
index e8db57e0..00000000
--- a/vendor/github.com/golang/protobuf/proto/deprecated.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "strconv"
-
- protoV2 "google.golang.org/protobuf/proto"
-)
-
-var (
- // Deprecated: No longer returned.
- ErrNil = errors.New("proto: Marshal called with nil")
-
- // Deprecated: No longer returned.
- ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-
- // Deprecated: No longer returned.
- ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-)
-
-// Deprecated: Do not use.
-type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
-
-// Deprecated: Do not use.
-func GetStats() Stats { return Stats{} }
-
-// Deprecated: Do not use.
-func MarshalMessageSet(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func UnmarshalMessageSet([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func MarshalMessageSetJSON(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func UnmarshalMessageSetJSON([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func RegisterMessageSetType(Message, int32, string) {}
-
-// Deprecated: Do not use.
-func EnumName(m map[int32]string, v int32) string {
- s, ok := m[v]
- if ok {
- return s
- }
- return strconv.Itoa(int(v))
-}
-
-// Deprecated: Do not use.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
- if data[0] == '"' {
- // New style: enums are strings.
- var repr string
- if err := json.Unmarshal(data, &repr); err != nil {
- return -1, err
- }
- val, ok := m[repr]
- if !ok {
- return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
- }
- return val, nil
- }
- // Old style: enums are ints.
- var val int32
- if err := json.Unmarshal(data, &val); err != nil {
- return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
- }
- return val, nil
-}
-
-// Deprecated: Do not use; this type existed for intenal-use only.
-type InternalMessageInfo struct{}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) DiscardUnknown(m Message) {
- DiscardUnknown(m)
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
- return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Merge(dst, src Message) {
- protoV2.Merge(MessageV2(dst), MessageV2(src))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Size(m Message) int {
- return protoV2.Size(MessageV2(m))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
- return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
-}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
deleted file mode 100644
index 2187e877..00000000
--- a/vendor/github.com/golang/protobuf/proto/discard.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// DiscardUnknown recursively discards all unknown fields from this message
-// and all embedded messages.
-//
-// When unmarshaling a message with unrecognized fields, the tags and values
-// of such fields are preserved in the Message. This allows a later call to
-// marshal to be able to produce a message that continues to have those
-// unrecognized fields. To avoid this, DiscardUnknown is used to
-// explicitly clear the unknown fields after unmarshaling.
-func DiscardUnknown(m Message) {
- if m != nil {
- discardUnknown(MessageReflect(m))
- }
-}
-
-func discardUnknown(m protoreflect.Message) {
- m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
- switch {
- // Handle singular message.
- case fd.Cardinality() != protoreflect.Repeated:
- if fd.Message() != nil {
- discardUnknown(m.Get(fd).Message())
- }
- // Handle list of messages.
- case fd.IsList():
- if fd.Message() != nil {
- ls := m.Get(fd).List()
- for i := 0; i < ls.Len(); i++ {
- discardUnknown(ls.Get(i).Message())
- }
- }
- // Handle map of messages.
- case fd.IsMap():
- if fd.MapValue().Message() != nil {
- ms := m.Get(fd).Map()
- ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
- discardUnknown(v.Message())
- return true
- })
- }
- }
- return true
- })
-
- // Discard unknown fields.
- if len(m.GetUnknown()) > 0 {
- m.SetUnknown(nil)
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
deleted file mode 100644
index 42fc120c..00000000
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "reflect"
-
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
- "google.golang.org/protobuf/runtime/protoiface"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-type (
- // ExtensionDesc represents an extension descriptor and
- // is used to interact with an extension field in a message.
- //
- // Variables of this type are generated in code by protoc-gen-go.
- ExtensionDesc = protoimpl.ExtensionInfo
-
- // ExtensionRange represents a range of message extensions.
- // Used in code generated by protoc-gen-go.
- ExtensionRange = protoiface.ExtensionRangeV1
-
- // Deprecated: Do not use; this is an internal type.
- Extension = protoimpl.ExtensionFieldV1
-
- // Deprecated: Do not use; this is an internal type.
- XXX_InternalExtensions = protoimpl.ExtensionFields
-)
-
-// ErrMissingExtension reports whether the extension was not present.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-var errNotExtendable = errors.New("proto: not an extendable proto.Message")
-
-// HasExtension reports whether the extension field is present in m
-// either as an explicitly populated field or as an unknown field.
-func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return false
- }
-
- // Check whether any populated known field matches the field number.
- xtd := xt.TypeDescriptor()
- if isValidExtension(mr.Descriptor(), xtd) {
- has = mr.Has(xtd)
- } else {
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- has = int32(fd.Number()) == xt.Field
- return !has
- })
- }
-
- // Check whether any unknown field matches the field number.
- for b := mr.GetUnknown(); !has && len(b) > 0; {
- num, _, n := protowire.ConsumeField(b)
- has = int32(num) == xt.Field
- b = b[n:]
- }
- return has
-}
-
-// ClearExtension removes the extension field from m
-// either as an explicitly populated field or as an unknown field.
-func ClearExtension(m Message, xt *ExtensionDesc) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- xtd := xt.TypeDescriptor()
- if isValidExtension(mr.Descriptor(), xtd) {
- mr.Clear(xtd)
- } else {
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- if int32(fd.Number()) == xt.Field {
- mr.Clear(fd)
- return false
- }
- return true
- })
- }
- clearUnknown(mr, fieldNum(xt.Field))
-}
-
-// ClearAllExtensions clears all extensions from m.
-// This includes populated fields and unknown fields in the extension range.
-func ClearAllExtensions(m Message) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- if fd.IsExtension() {
- mr.Clear(fd)
- }
- return true
- })
- clearUnknown(mr, mr.Descriptor().ExtensionRanges())
-}
-
-// GetExtension retrieves a proto2 extended field from m.
-//
-// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
-// then GetExtension parses the encoded field and returns a Go value of the specified type.
-// If the field is not present, then the default value is returned (if one is specified),
-// otherwise ErrMissingExtension is reported.
-//
-// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
-// then GetExtension returns the raw encoded bytes for the extension field.
-func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return nil, errNotExtendable
- }
-
- // Retrieve the unknown fields for this extension field.
- var bo protoreflect.RawFields
- for bi := mr.GetUnknown(); len(bi) > 0; {
- num, _, n := protowire.ConsumeField(bi)
- if int32(num) == xt.Field {
- bo = append(bo, bi[:n]...)
- }
- bi = bi[n:]
- }
-
- // For type incomplete descriptors, only retrieve the unknown fields.
- if xt.ExtensionType == nil {
- return []byte(bo), nil
- }
-
- // If the extension field only exists as unknown fields, unmarshal it.
- // This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
- xtd := xt.TypeDescriptor()
- if !isValidExtension(mr.Descriptor(), xtd) {
- return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
- }
- if !mr.Has(xtd) && len(bo) > 0 {
- m2 := mr.New()
- if err := (proto.UnmarshalOptions{
- Resolver: extensionResolver{xt},
- }.Unmarshal(bo, m2.Interface())); err != nil {
- return nil, err
- }
- if m2.Has(xtd) {
- mr.Set(xtd, m2.Get(xtd))
- clearUnknown(mr, fieldNum(xt.Field))
- }
- }
-
- // Check whether the message has the extension field set or a default.
- var pv protoreflect.Value
- switch {
- case mr.Has(xtd):
- pv = mr.Get(xtd)
- case xtd.HasDefault():
- pv = xtd.Default()
- default:
- return nil, ErrMissingExtension
- }
-
- v := xt.InterfaceOf(pv)
- rv := reflect.ValueOf(v)
- if isScalarKind(rv.Kind()) {
- rv2 := reflect.New(rv.Type())
- rv2.Elem().Set(rv)
- v = rv2.Interface()
- }
- return v, nil
-}
-
-// extensionResolver is a custom extension resolver that stores a single
-// extension type that takes precedence over the global registry.
-type extensionResolver struct{ xt protoreflect.ExtensionType }
-
-func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
- if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
- return r.xt, nil
- }
- return protoregistry.GlobalTypes.FindExtensionByName(field)
-}
-
-func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
- if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
- return r.xt, nil
- }
- return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
-}
-
-// GetExtensions returns a list of the extensions values present in m,
-// corresponding with the provided list of extension descriptors, xts.
-// If an extension is missing in m, the corresponding value is nil.
-func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return nil, errNotExtendable
- }
-
- vs := make([]interface{}, len(xts))
- for i, xt := range xts {
- v, err := GetExtension(m, xt)
- if err != nil {
- if err == ErrMissingExtension {
- continue
- }
- return vs, err
- }
- vs[i] = v
- }
- return vs, nil
-}
-
-// SetExtension sets an extension field in m to the provided value.
-func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return errNotExtendable
- }
-
- rv := reflect.ValueOf(v)
- if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
- return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
- }
- if rv.Kind() == reflect.Ptr {
- if rv.IsNil() {
- return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
- }
- if isScalarKind(rv.Elem().Kind()) {
- v = rv.Elem().Interface()
- }
- }
-
- xtd := xt.TypeDescriptor()
- if !isValidExtension(mr.Descriptor(), xtd) {
- return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
- }
- mr.Set(xtd, xt.ValueOf(v))
- clearUnknown(mr, fieldNum(xt.Field))
- return nil
-}
-
-// SetRawExtension inserts b into the unknown fields of m.
-//
-// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
-func SetRawExtension(m Message, fnum int32, b []byte) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- // Verify that the raw field is valid.
- for b0 := b; len(b0) > 0; {
- num, _, n := protowire.ConsumeField(b0)
- if int32(num) != fnum {
- panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
- }
- b0 = b0[n:]
- }
-
- ClearExtension(m, &ExtensionDesc{Field: fnum})
- mr.SetUnknown(append(mr.GetUnknown(), b...))
-}
-
-// ExtensionDescs returns a list of extension descriptors found in m,
-// containing descriptors for both populated extension fields in m and
-// also unknown fields of m that are in the extension range.
-// For the later case, an type incomplete descriptor is provided where only
-// the ExtensionDesc.Field field is populated.
-// The order of the extension descriptors is undefined.
-func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return nil, errNotExtendable
- }
-
- // Collect a set of known extension descriptors.
- extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
- mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- if fd.IsExtension() {
- xt := fd.(protoreflect.ExtensionTypeDescriptor)
- if xd, ok := xt.Type().(*ExtensionDesc); ok {
- extDescs[fd.Number()] = xd
- }
- }
- return true
- })
-
- // Collect a set of unknown extension descriptors.
- extRanges := mr.Descriptor().ExtensionRanges()
- for b := mr.GetUnknown(); len(b) > 0; {
- num, _, n := protowire.ConsumeField(b)
- if extRanges.Has(num) && extDescs[num] == nil {
- extDescs[num] = nil
- }
- b = b[n:]
- }
-
- // Transpose the set of descriptors into a list.
- var xts []*ExtensionDesc
- for num, xt := range extDescs {
- if xt == nil {
- xt = &ExtensionDesc{Field: int32(num)}
- }
- xts = append(xts, xt)
- }
- return xts, nil
-}
-
-// isValidExtension reports whether xtd is a valid extension descriptor for md.
-func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
- return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
-}
-
-// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
-// This function exists for historical reasons since the representation of
-// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
-func isScalarKind(k reflect.Kind) bool {
- switch k {
- case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
- return true
- default:
- return false
- }
-}
-
-// clearUnknown removes unknown fields from m where remover.Has reports true.
-func clearUnknown(m protoreflect.Message, remover interface {
- Has(protoreflect.FieldNumber) bool
-}) {
- var bo protoreflect.RawFields
- for bi := m.GetUnknown(); len(bi) > 0; {
- num, _, n := protowire.ConsumeField(bi)
- if !remover.Has(num) {
- bo = append(bo, bi[:n]...)
- }
- bi = bi[n:]
- }
- if bi := m.GetUnknown(); len(bi) != len(bo) {
- m.SetUnknown(bo)
- }
-}
-
-type fieldNum protoreflect.FieldNumber
-
-func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
- return protoreflect.FieldNumber(n1) == n2
-}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
deleted file mode 100644
index dcdc2202..00000000
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "sync"
-
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-// StructProperties represents protocol buffer type information for a
-// generated protobuf message in the open-struct API.
-//
-// Deprecated: Do not use.
-type StructProperties struct {
- // Prop are the properties for each field.
- //
- // Fields belonging to a oneof are stored in OneofTypes instead, with a
- // single Properties representing the parent oneof held here.
- //
- // The order of Prop matches the order of fields in the Go struct.
- // Struct fields that are not related to protobufs have a "XXX_" prefix
- // in the Properties.Name and must be ignored by the user.
- Prop []*Properties
-
- // OneofTypes contains information about the oneof fields in this message.
- // It is keyed by the protobuf field name.
- OneofTypes map[string]*OneofProperties
-}
-
-// Properties represents the type information for a protobuf message field.
-//
-// Deprecated: Do not use.
-type Properties struct {
- // Name is a placeholder name with little meaningful semantic value.
- // If the name has an "XXX_" prefix, the entire Properties must be ignored.
- Name string
- // OrigName is the protobuf field name or oneof name.
- OrigName string
- // JSONName is the JSON name for the protobuf field.
- JSONName string
- // Enum is a placeholder name for enums.
- // For historical reasons, this is neither the Go name for the enum,
- // nor the protobuf name for the enum.
- Enum string // Deprecated: Do not use.
- // Weak contains the full name of the weakly referenced message.
- Weak string
- // Wire is a string representation of the wire type.
- Wire string
- // WireType is the protobuf wire type for the field.
- WireType int
- // Tag is the protobuf field number.
- Tag int
- // Required reports whether this is a required field.
- Required bool
- // Optional reports whether this is a optional field.
- Optional bool
- // Repeated reports whether this is a repeated field.
- Repeated bool
- // Packed reports whether this is a packed repeated field of scalars.
- Packed bool
- // Proto3 reports whether this field operates under the proto3 syntax.
- Proto3 bool
- // Oneof reports whether this field belongs within a oneof.
- Oneof bool
-
- // Default is the default value in string form.
- Default string
- // HasDefault reports whether the field has a default value.
- HasDefault bool
-
- // MapKeyProp is the properties for the key field for a map field.
- MapKeyProp *Properties
- // MapValProp is the properties for the value field for a map field.
- MapValProp *Properties
-}
-
-// OneofProperties represents the type information for a protobuf oneof.
-//
-// Deprecated: Do not use.
-type OneofProperties struct {
- // Type is a pointer to the generated wrapper type for the field value.
- // This is nil for messages that are not in the open-struct API.
- Type reflect.Type
- // Field is the index into StructProperties.Prop for the containing oneof.
- Field int
- // Prop is the properties for the field.
- Prop *Properties
-}
-
-// String formats the properties in the protobuf struct field tag style.
-func (p *Properties) String() string {
- s := p.Wire
- s += "," + strconv.Itoa(p.Tag)
- if p.Required {
- s += ",req"
- }
- if p.Optional {
- s += ",opt"
- }
- if p.Repeated {
- s += ",rep"
- }
- if p.Packed {
- s += ",packed"
- }
- s += ",name=" + p.OrigName
- if p.JSONName != "" {
- s += ",json=" + p.JSONName
- }
- if len(p.Enum) > 0 {
- s += ",enum=" + p.Enum
- }
- if len(p.Weak) > 0 {
- s += ",weak=" + p.Weak
- }
- if p.Proto3 {
- s += ",proto3"
- }
- if p.Oneof {
- s += ",oneof"
- }
- if p.HasDefault {
- s += ",def=" + p.Default
- }
- return s
-}
-
-// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(tag string) {
- // For example: "bytes,49,opt,name=foo,def=hello!"
- for len(tag) > 0 {
- i := strings.IndexByte(tag, ',')
- if i < 0 {
- i = len(tag)
- }
- switch s := tag[:i]; {
- case strings.HasPrefix(s, "name="):
- p.OrigName = s[len("name="):]
- case strings.HasPrefix(s, "json="):
- p.JSONName = s[len("json="):]
- case strings.HasPrefix(s, "enum="):
- p.Enum = s[len("enum="):]
- case strings.HasPrefix(s, "weak="):
- p.Weak = s[len("weak="):]
- case strings.Trim(s, "0123456789") == "":
- n, _ := strconv.ParseUint(s, 10, 32)
- p.Tag = int(n)
- case s == "opt":
- p.Optional = true
- case s == "req":
- p.Required = true
- case s == "rep":
- p.Repeated = true
- case s == "varint" || s == "zigzag32" || s == "zigzag64":
- p.Wire = s
- p.WireType = WireVarint
- case s == "fixed32":
- p.Wire = s
- p.WireType = WireFixed32
- case s == "fixed64":
- p.Wire = s
- p.WireType = WireFixed64
- case s == "bytes":
- p.Wire = s
- p.WireType = WireBytes
- case s == "group":
- p.Wire = s
- p.WireType = WireStartGroup
- case s == "packed":
- p.Packed = true
- case s == "proto3":
- p.Proto3 = true
- case s == "oneof":
- p.Oneof = true
- case strings.HasPrefix(s, "def="):
- // The default tag is special in that everything afterwards is the
- // default regardless of the presence of commas.
- p.HasDefault = true
- p.Default, i = tag[len("def="):], len(tag)
- }
- tag = strings.TrimPrefix(tag[i:], ",")
- }
-}
-
-// Init populates the properties from a protocol buffer struct tag.
-//
-// Deprecated: Do not use.
-func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
- p.Name = name
- p.OrigName = name
- if tag == "" {
- return
- }
- p.Parse(tag)
-
- if typ != nil && typ.Kind() == reflect.Map {
- p.MapKeyProp = new(Properties)
- p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
- p.MapValProp = new(Properties)
- p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
- }
-}
-
-var propertiesCache sync.Map // map[reflect.Type]*StructProperties
-
-// GetProperties returns the list of properties for the type represented by t,
-// which must be a generated protocol buffer message in the open-struct API,
-// where protobuf message fields are represented by exported Go struct fields.
-//
-// Deprecated: Use protobuf reflection instead.
-func GetProperties(t reflect.Type) *StructProperties {
- if p, ok := propertiesCache.Load(t); ok {
- return p.(*StructProperties)
- }
- p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
- return p.(*StructProperties)
-}
-
-func newProperties(t reflect.Type) *StructProperties {
- if t.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
- }
-
- var hasOneof bool
- prop := new(StructProperties)
-
- // Construct a list of properties for each field in the struct.
- for i := 0; i < t.NumField(); i++ {
- p := new(Properties)
- f := t.Field(i)
- tagField := f.Tag.Get("protobuf")
- p.Init(f.Type, f.Name, tagField, &f)
-
- tagOneof := f.Tag.Get("protobuf_oneof")
- if tagOneof != "" {
- hasOneof = true
- p.OrigName = tagOneof
- }
-
- // Rename unrelated struct fields with the "XXX_" prefix since so much
- // user code simply checks for this to exclude special fields.
- if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
- p.Name = "XXX_" + p.Name
- p.OrigName = "XXX_" + p.OrigName
- } else if p.Weak != "" {
- p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
- }
-
- prop.Prop = append(prop.Prop, p)
- }
-
- // Construct a mapping of oneof field names to properties.
- if hasOneof {
- var oneofWrappers []interface{}
- if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
- oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
- }
- if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
- oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
- }
- if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
- if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
- oneofWrappers = m.ProtoMessageInfo().OneofWrappers
- }
- }
-
- prop.OneofTypes = make(map[string]*OneofProperties)
- for _, wrapper := range oneofWrappers {
- p := &OneofProperties{
- Type: reflect.ValueOf(wrapper).Type(), // *T
- Prop: new(Properties),
- }
- f := p.Type.Elem().Field(0)
- p.Prop.Name = f.Name
- p.Prop.Parse(f.Tag.Get("protobuf"))
-
- // Determine the struct field that contains this oneof.
- // Each wrapper is assignable to exactly one parent field.
- var foundOneof bool
- for i := 0; i < t.NumField() && !foundOneof; i++ {
- if p.Type.AssignableTo(t.Field(i).Type) {
- p.Field = i
- foundOneof = true
- }
- }
- if !foundOneof {
- panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
- }
- prop.OneofTypes[p.Prop.OrigName] = p
- }
- }
-
- return prop
-}
-
-func (sp *StructProperties) Len() int { return len(sp.Prop) }
-func (sp *StructProperties) Less(i, j int) bool { return false }
-func (sp *StructProperties) Swap(i, j int) { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
deleted file mode 100644
index 5aee89c3..00000000
--- a/vendor/github.com/golang/protobuf/proto/proto.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package proto provides functionality for handling protocol buffer messages.
-// In particular, it provides marshaling and unmarshaling between a protobuf
-// message and the binary wire format.
-//
-// See https://developers.google.com/protocol-buffers/docs/gotutorial for
-// more information.
-//
-// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
-package proto
-
-import (
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/runtime/protoiface"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- ProtoPackageIsVersion1 = true
- ProtoPackageIsVersion2 = true
- ProtoPackageIsVersion3 = true
- ProtoPackageIsVersion4 = true
-)
-
-// GeneratedEnum is any enum type generated by protoc-gen-go
-// which is a named int32 kind.
-// This type exists for documentation purposes.
-type GeneratedEnum interface{}
-
-// GeneratedMessage is any message type generated by protoc-gen-go
-// which is a pointer to a named struct kind.
-// This type exists for documentation purposes.
-type GeneratedMessage interface{}
-
-// Message is a protocol buffer message.
-//
-// This is the v1 version of the message interface and is marginally better
-// than an empty interface as it lacks any method to programatically interact
-// with the contents of the message.
-//
-// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
-// exposes protobuf reflection as a first-class feature of the interface.
-//
-// To convert a v1 message to a v2 message, use the MessageV2 function.
-// To convert a v2 message to a v1 message, use the MessageV1 function.
-type Message = protoiface.MessageV1
-
-// MessageV1 converts either a v1 or v2 message to a v1 message.
-// It returns nil if m is nil.
-func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
- return protoimpl.X.ProtoMessageV1Of(m)
-}
-
-// MessageV2 converts either a v1 or v2 message to a v2 message.
-// It returns nil if m is nil.
-func MessageV2(m GeneratedMessage) protoV2.Message {
- return protoimpl.X.ProtoMessageV2Of(m)
-}
-
-// MessageReflect returns a reflective view for a message.
-// It returns nil if m is nil.
-func MessageReflect(m Message) protoreflect.Message {
- return protoimpl.X.MessageOf(m)
-}
-
-// Marshaler is implemented by messages that can marshal themselves.
-// This interface is used by the following functions: Size, Marshal,
-// Buffer.Marshal, and Buffer.EncodeMessage.
-//
-// Deprecated: Do not implement.
-type Marshaler interface {
- // Marshal formats the encoded bytes of the message.
- // It should be deterministic and emit valid protobuf wire data.
- // The caller takes ownership of the returned buffer.
- Marshal() ([]byte, error)
-}
-
-// Unmarshaler is implemented by messages that can unmarshal themselves.
-// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
-// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
-//
-// Deprecated: Do not implement.
-type Unmarshaler interface {
- // Unmarshal parses the encoded bytes of the protobuf wire input.
- // The provided buffer is only valid for during method call.
- // It should not reset the receiver message.
- Unmarshal([]byte) error
-}
-
-// Merger is implemented by messages that can merge themselves.
-// This interface is used by the following functions: Clone and Merge.
-//
-// Deprecated: Do not implement.
-type Merger interface {
- // Merge merges the contents of src into the receiver message.
- // It clones all data structures in src such that it aliases no mutable
- // memory referenced by src.
- Merge(src Message)
-}
-
-// RequiredNotSetError is an error type returned when
-// marshaling or unmarshaling a message with missing required fields.
-type RequiredNotSetError struct {
- err error
-}
-
-func (e *RequiredNotSetError) Error() string {
- if e.err != nil {
- return e.err.Error()
- }
- return "proto: required field not set"
-}
-func (e *RequiredNotSetError) RequiredNotSet() bool {
- return true
-}
-
-func checkRequiredNotSet(m protoV2.Message) error {
- if err := protoV2.CheckInitialized(m); err != nil {
- return &RequiredNotSetError{err: err}
- }
- return nil
-}
-
-// Clone returns a deep copy of src.
-func Clone(src Message) Message {
- return MessageV1(protoV2.Clone(MessageV2(src)))
-}
-
-// Merge merges src into dst, which must be messages of the same type.
-//
-// Populated scalar fields in src are copied to dst, while populated
-// singular messages in src are merged into dst by recursively calling Merge.
-// The elements of every list field in src is appended to the corresponded
-// list fields in dst. The entries of every map field in src is copied into
-// the corresponding map field in dst, possibly replacing existing entries.
-// The unknown fields of src are appended to the unknown fields of dst.
-func Merge(dst, src Message) {
- protoV2.Merge(MessageV2(dst), MessageV2(src))
-}
-
-// Equal reports whether two messages are equal.
-// If two messages marshal to the same bytes under deterministic serialization,
-// then Equal is guaranteed to report true.
-//
-// Two messages are equal if they are the same protobuf message type,
-// have the same set of populated known and extension field values,
-// and the same set of unknown fields values.
-//
-// Scalar values are compared with the equivalent of the == operator in Go,
-// except bytes values which are compared using bytes.Equal and
-// floating point values which specially treat NaNs as equal.
-// Message values are compared by recursively calling Equal.
-// Lists are equal if each element value is also equal.
-// Maps are equal if they have the same set of keys, where the pair of values
-// for each key is also equal.
-func Equal(x, y Message) bool {
- return protoV2.Equal(MessageV2(x), MessageV2(y))
-}
-
-func isMessageSet(md protoreflect.MessageDescriptor) bool {
- ms, ok := md.(interface{ IsMessageSet() bool })
- return ok && ms.IsMessageSet()
-}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
deleted file mode 100644
index 066b4323..00000000
--- a/vendor/github.com/golang/protobuf/proto/registry.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "io/ioutil"
- "reflect"
- "strings"
- "sync"
-
- "google.golang.org/protobuf/reflect/protodesc"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-// filePath is the path to the proto source file.
-type filePath = string // e.g., "google/protobuf/descriptor.proto"
-
-// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
-type fileDescGZIP = []byte
-
-var fileCache sync.Map // map[filePath]fileDescGZIP
-
-// RegisterFile is called from generated code to register the compressed
-// FileDescriptorProto with the file path for a proto source file.
-//
-// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
-func RegisterFile(s filePath, d fileDescGZIP) {
- // Decompress the descriptor.
- zr, err := gzip.NewReader(bytes.NewReader(d))
- if err != nil {
- panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
- }
- b, err := ioutil.ReadAll(zr)
- if err != nil {
- panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
- }
-
- // Construct a protoreflect.FileDescriptor from the raw descriptor.
- // Note that DescBuilder.Build automatically registers the constructed
- // file descriptor with the v2 registry.
- protoimpl.DescBuilder{RawDescriptor: b}.Build()
-
- // Locally cache the raw descriptor form for the file.
- fileCache.Store(s, d)
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto given the file path
-// for a proto source file. It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
-func FileDescriptor(s filePath) fileDescGZIP {
- if v, ok := fileCache.Load(s); ok {
- return v.(fileDescGZIP)
- }
-
- // Find the descriptor in the v2 registry.
- var b []byte
- if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
- b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
- }
-
- // Locally cache the raw descriptor form for the file.
- if len(b) > 0 {
- v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
- return v.(fileDescGZIP)
- }
- return nil
-}
-
-// enumName is the name of an enum. For historical reasons, the enum name is
-// neither the full Go name nor the full protobuf name of the enum.
-// The name is the dot-separated combination of just the proto package that the
-// enum is declared within followed by the Go type name of the generated enum.
-type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
-
-// enumsByName maps enum values by name to their numeric counterpart.
-type enumsByName = map[string]int32
-
-// enumsByNumber maps enum values by number to their name counterpart.
-type enumsByNumber = map[int32]string
-
-var enumCache sync.Map // map[enumName]enumsByName
-var numFilesCache sync.Map // map[protoreflect.FullName]int
-
-// RegisterEnum is called from the generated code to register the mapping of
-// enum value names to enum numbers for the enum identified by s.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
-func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
- if _, ok := enumCache.Load(s); ok {
- panic("proto: duplicate enum registered: " + s)
- }
- enumCache.Store(s, m)
-
- // This does not forward registration to the v2 registry since this API
- // lacks sufficient information to construct a complete v2 enum descriptor.
-}
-
-// EnumValueMap returns the mapping from enum value names to enum numbers for
-// the enum of the given name. It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
-func EnumValueMap(s enumName) enumsByName {
- if v, ok := enumCache.Load(s); ok {
- return v.(enumsByName)
- }
-
- // Check whether the cache is stale. If the number of files in the current
- // package differs, then it means that some enums may have been recently
- // registered upstream that we do not know about.
- var protoPkg protoreflect.FullName
- if i := strings.LastIndexByte(s, '.'); i >= 0 {
- protoPkg = protoreflect.FullName(s[:i])
- }
- v, _ := numFilesCache.Load(protoPkg)
- numFiles, _ := v.(int)
- if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
- return nil // cache is up-to-date; was not found earlier
- }
-
- // Update the enum cache for all enums declared in the given proto package.
- numFiles = 0
- protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
- walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
- name := protoimpl.X.LegacyEnumName(ed)
- if _, ok := enumCache.Load(name); !ok {
- m := make(enumsByName)
- evs := ed.Values()
- for i := evs.Len() - 1; i >= 0; i-- {
- ev := evs.Get(i)
- m[string(ev.Name())] = int32(ev.Number())
- }
- enumCache.LoadOrStore(name, m)
- }
- })
- numFiles++
- return true
- })
- numFilesCache.Store(protoPkg, numFiles)
-
- // Check cache again for enum map.
- if v, ok := enumCache.Load(s); ok {
- return v.(enumsByName)
- }
- return nil
-}
-
-// walkEnums recursively walks all enums declared in d.
-func walkEnums(d interface {
- Enums() protoreflect.EnumDescriptors
- Messages() protoreflect.MessageDescriptors
-}, f func(protoreflect.EnumDescriptor)) {
- eds := d.Enums()
- for i := eds.Len() - 1; i >= 0; i-- {
- f(eds.Get(i))
- }
- mds := d.Messages()
- for i := mds.Len() - 1; i >= 0; i-- {
- walkEnums(mds.Get(i), f)
- }
-}
-
-// messageName is the full name of protobuf message.
-type messageName = string
-
-var messageTypeCache sync.Map // map[messageName]reflect.Type
-
-// RegisterType is called from generated code to register the message Go type
-// for a message of the given name.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
-func RegisterType(m Message, s messageName) {
- mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
- if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
- panic(err)
- }
- messageTypeCache.Store(s, reflect.TypeOf(m))
-}
-
-// RegisterMapType is called from generated code to register the Go map type
-// for a protobuf message representing a map entry.
-//
-// Deprecated: Do not use.
-func RegisterMapType(m interface{}, s messageName) {
- t := reflect.TypeOf(m)
- if t.Kind() != reflect.Map {
- panic(fmt.Sprintf("invalid map kind: %v", t))
- }
- if _, ok := messageTypeCache.Load(s); ok {
- panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
- }
- messageTypeCache.Store(s, t)
-}
-
-// MessageType returns the message type for a named message.
-// It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
-func MessageType(s messageName) reflect.Type {
- if v, ok := messageTypeCache.Load(s); ok {
- return v.(reflect.Type)
- }
-
- // Derive the message type from the v2 registry.
- var t reflect.Type
- if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
- t = messageGoType(mt)
- }
-
- // If we could not get a concrete type, it is possible that it is a
- // pseudo-message for a map entry.
- if t == nil {
- d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
- if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
- kt := goTypeForField(md.Fields().ByNumber(1))
- vt := goTypeForField(md.Fields().ByNumber(2))
- t = reflect.MapOf(kt, vt)
- }
- }
-
- // Locally cache the message type for the given name.
- if t != nil {
- v, _ := messageTypeCache.LoadOrStore(s, t)
- return v.(reflect.Type)
- }
- return nil
-}
-
-func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
- switch k := fd.Kind(); k {
- case protoreflect.EnumKind:
- if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
- return enumGoType(et)
- }
- return reflect.TypeOf(protoreflect.EnumNumber(0))
- case protoreflect.MessageKind, protoreflect.GroupKind:
- if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
- return messageGoType(mt)
- }
- return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
- default:
- return reflect.TypeOf(fd.Default().Interface())
- }
-}
-
-func enumGoType(et protoreflect.EnumType) reflect.Type {
- return reflect.TypeOf(et.New(0))
-}
-
-func messageGoType(mt protoreflect.MessageType) reflect.Type {
- return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
-}
-
-// MessageName returns the full protobuf name for the given message type.
-//
-// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
-func MessageName(m Message) messageName {
- if m == nil {
- return ""
- }
- if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
- return m.XXX_MessageName()
- }
- return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
-}
-
-// RegisterExtension is called from the generated code to register
-// the extension descriptor.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
-func RegisterExtension(d *ExtensionDesc) {
- if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
- panic(err)
- }
-}
-
-type extensionsByNumber = map[int32]*ExtensionDesc
-
-var extensionCache sync.Map // map[messageName]extensionsByNumber
-
-// RegisteredExtensions returns a map of the registered extensions for the
-// provided protobuf message, indexed by the extension field number.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
-func RegisteredExtensions(m Message) extensionsByNumber {
- // Check whether the cache is stale. If the number of extensions for
- // the given message differs, then it means that some extensions were
- // recently registered upstream that we do not know about.
- s := MessageName(m)
- v, _ := extensionCache.Load(s)
- xs, _ := v.(extensionsByNumber)
- if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
- return xs // cache is up-to-date
- }
-
- // Cache is stale, re-compute the extensions map.
- xs = make(extensionsByNumber)
- protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
- if xd, ok := xt.(*ExtensionDesc); ok {
- xs[int32(xt.TypeDescriptor().Number())] = xd
- } else {
- // TODO: This implies that the protoreflect.ExtensionType is a
- // custom type not generated by protoc-gen-go. We could try and
- // convert the type to an ExtensionDesc.
- }
- return true
- })
- extensionCache.Store(s, xs)
- return xs
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
deleted file mode 100644
index 47eb3e44..00000000
--- a/vendor/github.com/golang/protobuf/proto/text_decode.go
+++ /dev/null
@@ -1,801 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "unicode/utf8"
-
- "google.golang.org/protobuf/encoding/prototext"
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-const wrapTextUnmarshalV2 = false
-
-// ParseError is returned by UnmarshalText.
-type ParseError struct {
- Message string
-
- // Deprecated: Do not use.
- Line, Offset int
-}
-
-func (e *ParseError) Error() string {
- if wrapTextUnmarshalV2 {
- return e.Message
- }
- if e.Line == 1 {
- return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
- }
- return fmt.Sprintf("line %d: %v", e.Line, e.Message)
-}
-
-// UnmarshalText parses a proto text formatted string into m.
-func UnmarshalText(s string, m Message) error {
- if u, ok := m.(encoding.TextUnmarshaler); ok {
- return u.UnmarshalText([]byte(s))
- }
-
- m.Reset()
- mi := MessageV2(m)
-
- if wrapTextUnmarshalV2 {
- err := prototext.UnmarshalOptions{
- AllowPartial: true,
- }.Unmarshal([]byte(s), mi)
- if err != nil {
- return &ParseError{Message: err.Error()}
- }
- return checkRequiredNotSet(mi)
- } else {
- if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
- return err
- }
- return checkRequiredNotSet(mi)
- }
-}
-
-type textParser struct {
- s string // remaining input
- done bool // whether the parsing is finished (success or error)
- backed bool // whether back() was called
- offset, line int
- cur token
-}
-
-type token struct {
- value string
- err *ParseError
- line int // line number
- offset int // byte number from start of input, not start of line
- unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func newTextParser(s string) *textParser {
- p := new(textParser)
- p.s = s
- p.line = 1
- p.cur.line = 1
- return p
-}
-
-func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
- md := m.Descriptor()
- fds := md.Fields()
-
- // A struct is a sequence of "name: value", terminated by one of
- // '>' or '}', or the end of the input. A name may also be
- // "[extension]" or "[type/url]".
- //
- // The whole struct can also be an expanded Any message, like:
- // [type/url] < ... struct contents ... >
- seen := make(map[protoreflect.FieldNumber]bool)
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- if tok.value == "[" {
- if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
- return err
- }
- continue
- }
-
- // This is a normal, non-extension field.
- name := protoreflect.Name(tok.value)
- fd := fds.ByName(name)
- switch {
- case fd == nil:
- gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
- if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
- fd = gd
- }
- case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
- fd = nil
- case fd.IsWeak() && fd.Message().IsPlaceholder():
- fd = nil
- }
- if fd == nil {
- typeName := string(md.FullName())
- if m, ok := m.Interface().(Message); ok {
- t := reflect.TypeOf(m)
- if t.Kind() == reflect.Ptr {
- typeName = t.Elem().String()
- }
- }
- return p.errorf("unknown field name %q in %v", name, typeName)
- }
- if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
- return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
- }
- if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
- return p.errorf("non-repeated field %q was repeated", fd.Name())
- }
- seen[fd.Number()] = true
-
- // Consume any colon.
- if err := p.checkForColon(fd); err != nil {
- return err
- }
-
- // Parse into the field.
- v := m.Get(fd)
- if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
- v = m.Mutable(fd)
- }
- if v, err = p.unmarshalValue(v, fd); err != nil {
- return err
- }
- m.Set(fd, v)
-
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
- name, err := p.consumeExtensionOrAnyName()
- if err != nil {
- return err
- }
-
- // If it contains a slash, it's an Any type URL.
- if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- // consume an optional colon
- if tok.value == ":" {
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- }
-
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
-
- mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
- if err != nil {
- return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
- }
- m2 := mt.New()
- if err := p.unmarshalMessage(m2, terminator); err != nil {
- return err
- }
- b, err := protoV2.Marshal(m2.Interface())
- if err != nil {
- return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
- }
-
- urlFD := m.Descriptor().Fields().ByName("type_url")
- valFD := m.Descriptor().Fields().ByName("value")
- if seen[urlFD.Number()] {
- return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
- }
- if seen[valFD.Number()] {
- return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
- }
- m.Set(urlFD, protoreflect.ValueOfString(name))
- m.Set(valFD, protoreflect.ValueOfBytes(b))
- seen[urlFD.Number()] = true
- seen[valFD.Number()] = true
- return nil
- }
-
- xname := protoreflect.FullName(name)
- xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
- if xt == nil && isMessageSet(m.Descriptor()) {
- xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
- }
- if xt == nil {
- return p.errorf("unrecognized extension %q", name)
- }
- fd := xt.TypeDescriptor()
- if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
- return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
- }
-
- if err := p.checkForColon(fd); err != nil {
- return err
- }
-
- v := m.Get(fd)
- if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
- v = m.Mutable(fd)
- }
- v, err = p.unmarshalValue(v, fd)
- if err != nil {
- return err
- }
- m.Set(fd, v)
- return p.consumeOptionalSeparator()
-}
-
-func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "" {
- return v, p.errorf("unexpected EOF")
- }
-
- switch {
- case fd.IsList():
- lv := v.List()
- var err error
- if tok.value == "[" {
- // Repeated field with list notation, like [1,2,3].
- for {
- vv := lv.NewElement()
- vv, err = p.unmarshalSingularValue(vv, fd)
- if err != nil {
- return v, err
- }
- lv.Append(vv)
-
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "]" {
- break
- }
- if tok.value != "," {
- return v, p.errorf("Expected ']' or ',' found %q", tok.value)
- }
- }
- return v, nil
- }
-
- // One value of the repeated field.
- p.back()
- vv := lv.NewElement()
- vv, err = p.unmarshalSingularValue(vv, fd)
- if err != nil {
- return v, err
- }
- lv.Append(vv)
- return v, nil
- case fd.IsMap():
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // However, implementations may omit key or value, and technically
- // we should support them in any order.
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return v, p.errorf("expected '{' or '<', found %q", tok.value)
- }
-
- keyFD := fd.MapKey()
- valFD := fd.MapValue()
-
- mv := v.Map()
- kv := keyFD.Default()
- vv := mv.NewValue()
- for {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == terminator {
- break
- }
- var err error
- switch tok.value {
- case "key":
- if err := p.consumeToken(":"); err != nil {
- return v, err
- }
- if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
- return v, err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return v, err
- }
- case "value":
- if err := p.checkForColon(valFD); err != nil {
- return v, err
- }
- if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
- return v, err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return v, err
- }
- default:
- p.back()
- return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
- }
- }
- mv.Set(kv.MapKey(), vv)
- return v, nil
- default:
- p.back()
- return p.unmarshalSingularValue(v, fd)
- }
-}
-
-func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "" {
- return v, p.errorf("unexpected EOF")
- }
-
- switch fd.Kind() {
- case protoreflect.BoolKind:
- switch tok.value {
- case "true", "1", "t", "True":
- return protoreflect.ValueOfBool(true), nil
- case "false", "0", "f", "False":
- return protoreflect.ValueOfBool(false), nil
- }
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfInt32(int32(x)), nil
- }
-
- // The C++ parser accepts large positive hex numbers that uses
- // two's complement arithmetic to represent negative numbers.
- // This feature is here for backwards compatibility with C++.
- if strings.HasPrefix(tok.value, "0x") {
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
- }
- }
- case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfInt64(int64(x)), nil
- }
-
- // The C++ parser accepts large positive hex numbers that uses
- // two's complement arithmetic to represent negative numbers.
- // This feature is here for backwards compatibility with C++.
- if strings.HasPrefix(tok.value, "0x") {
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
- }
- }
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfUint32(uint32(x)), nil
- }
- case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfUint64(uint64(x)), nil
- }
- case protoreflect.FloatKind:
- // Ignore 'f' for compatibility with output generated by C++,
- // but don't remove 'f' when the value is "-inf" or "inf".
- v := tok.value
- if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
- v = v[:len(v)-len("f")]
- }
- if x, err := strconv.ParseFloat(v, 32); err == nil {
- return protoreflect.ValueOfFloat32(float32(x)), nil
- }
- case protoreflect.DoubleKind:
- // Ignore 'f' for compatibility with output generated by C++,
- // but don't remove 'f' when the value is "-inf" or "inf".
- v := tok.value
- if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
- v = v[:len(v)-len("f")]
- }
- if x, err := strconv.ParseFloat(v, 64); err == nil {
- return protoreflect.ValueOfFloat64(float64(x)), nil
- }
- case protoreflect.StringKind:
- if isQuote(tok.value[0]) {
- return protoreflect.ValueOfString(tok.unquoted), nil
- }
- case protoreflect.BytesKind:
- if isQuote(tok.value[0]) {
- return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
- }
- case protoreflect.EnumKind:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
- }
- vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
- if vd != nil {
- return protoreflect.ValueOfEnum(vd.Number()), nil
- }
- case protoreflect.MessageKind, protoreflect.GroupKind:
- var terminator string
- switch tok.value {
- case "{":
- terminator = "}"
- case "<":
- terminator = ">"
- default:
- return v, p.errorf("expected '{' or '<', found %q", tok.value)
- }
- err := p.unmarshalMessage(v.Message(), terminator)
- return v, err
- default:
- panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
- }
- return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ":" {
- if fd.Message() == nil {
- return p.errorf("expected ':', found %q", tok.value)
- }
- p.back()
- }
- return nil
-}
-
-// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
-// the following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtensionOrAnyName() (string, error) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
-
- // If extension name or type url is quoted, it's a single token.
- if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
- name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
- if err != nil {
- return "", err
- }
- return name, p.consumeToken("]")
- }
-
- // Consume everything up to "]"
- var parts []string
- for tok.value != "]" {
- parts = append(parts, tok.value)
- tok = p.next()
- if tok.err != nil {
- return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
- }
- if p.done && tok.value != "]" {
- return "", p.errorf("unclosed type_url or extension name")
- }
- }
- return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in unmarshalMessage to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ";" && tok.value != "," {
- p.back()
- }
- return nil
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
- pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-func (p *textParser) skipWhitespace() {
- i := 0
- for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
- if p.s[i] == '#' {
- // comment; skip to end of line or input
- for i < len(p.s) && p.s[i] != '\n' {
- i++
- }
- if i == len(p.s) {
- break
- }
- }
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- }
- p.offset += i
- p.s = p.s[i:len(p.s)]
- if len(p.s) == 0 {
- p.done = true
- }
-}
-
-func (p *textParser) advance() {
- // Skip whitespace
- p.skipWhitespace()
- if p.done {
- return
- }
-
- // Start of non-whitespace
- p.cur.err = nil
- p.cur.offset, p.cur.line = p.offset, p.line
- p.cur.unquoted = ""
- switch p.s[0] {
- case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
- // Single symbol
- p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
- case '"', '\'':
- // Quoted string
- i := 1
- for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
- if p.s[i] == '\\' && i+1 < len(p.s) {
- // skip escaped char
- i++
- }
- i++
- }
- if i >= len(p.s) || p.s[i] != p.s[0] {
- p.errorf("unmatched quote")
- return
- }
- unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
- if err != nil {
- p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
- return
- }
- p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
- p.cur.unquoted = unq
- default:
- i := 0
- for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
- i++
- }
- if i == 0 {
- p.errorf("unexpected byte %#x", p.s[0])
- return
- }
- p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
- }
- p.offset += len(p.cur.value)
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done {
- p.cur.value = ""
- } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
- // Look for multiple quoted strings separated by whitespace,
- // and concatenate them.
- cat := p.cur
- for {
- p.skipWhitespace()
- if p.done || !isQuote(p.s[0]) {
- break
- }
- p.advance()
- if p.cur.err != nil {
- return &p.cur
- }
- cat.value += " " + p.cur.value
- cat.unquoted += p.cur.unquoted
- }
- p.done = false // parser may have seen EOF, but we want to return cat
- p.cur = cat
- }
- return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != s {
- p.back()
- return p.errorf("expected %q, found %q", s, tok.value)
- }
- return nil
-}
-
-var errBadUTF8 = errors.New("proto: bad UTF-8")
-
-func unquoteC(s string, quote rune) (string, error) {
- // This is based on C++'s tokenizer.cc.
- // Despite its name, this is *not* parsing C syntax.
- // For instance, "\0" is an invalid quoted string.
-
- // Avoid allocation in trivial cases.
- simple := true
- for _, r := range s {
- if r == '\\' || r == quote {
- simple = false
- break
- }
- }
- if simple {
- return s, nil
- }
-
- buf := make([]byte, 0, 3*len(s)/2)
- for len(s) > 0 {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", errBadUTF8
- }
- s = s[n:]
- if r != '\\' {
- if r < utf8.RuneSelf {
- buf = append(buf, byte(r))
- } else {
- buf = append(buf, string(r)...)
- }
- continue
- }
-
- ch, tail, err := unescape(s)
- if err != nil {
- return "", err
- }
- buf = append(buf, ch...)
- s = tail
- }
- return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", "", errBadUTF8
- }
- s = s[n:]
- switch r {
- case 'a':
- return "\a", s, nil
- case 'b':
- return "\b", s, nil
- case 'f':
- return "\f", s, nil
- case 'n':
- return "\n", s, nil
- case 'r':
- return "\r", s, nil
- case 't':
- return "\t", s, nil
- case 'v':
- return "\v", s, nil
- case '?':
- return "?", s, nil // trigraph workaround
- case '\'', '"', '\\':
- return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7':
- if len(s) < 2 {
- return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
- }
- ss := string(r) + s[:2]
- s = s[2:]
- i, err := strconv.ParseUint(ss, 8, 8)
- if err != nil {
- return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
- }
- return string([]byte{byte(i)}), s, nil
- case 'x', 'X', 'u', 'U':
- var n int
- switch r {
- case 'x', 'X':
- n = 2
- case 'u':
- n = 4
- case 'U':
- n = 8
- }
- if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
- }
- ss := s[:n]
- s = s[n:]
- i, err := strconv.ParseUint(ss, 16, 64)
- if err != nil {
- return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
- }
- if r == 'x' || r == 'X' {
- return string([]byte{byte(i)}), s, nil
- }
- if i > utf8.MaxRune {
- return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
- }
- return string(rune(i)), s, nil
- }
- return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-func isIdentOrNumberChar(c byte) bool {
- switch {
- case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- }
- switch c {
- case '-', '+', '.', '_':
- return true
- }
- return false
-}
-
-func isWhitespace(c byte) bool {
- switch c {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-func isQuote(c byte) bool {
- switch c {
- case '"', '\'':
- return true
- }
- return false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
deleted file mode 100644
index a31134ee..00000000
--- a/vendor/github.com/golang/protobuf/proto/text_encode.go
+++ /dev/null
@@ -1,560 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "bytes"
- "encoding"
- "fmt"
- "io"
- "math"
- "sort"
- "strings"
-
- "google.golang.org/protobuf/encoding/prototext"
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-const wrapTextMarshalV2 = false
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
- Compact bool // use compact text format (one line)
- ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes the proto text format of m to w.
-func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
- b, err := tm.marshal(m)
- if len(b) > 0 {
- if _, err := w.Write(b); err != nil {
- return err
- }
- }
- return err
-}
-
-// Text returns a proto text formatted string of m.
-func (tm *TextMarshaler) Text(m Message) string {
- b, _ := tm.marshal(m)
- return string(b)
-}
-
-func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return []byte(""), nil
- }
-
- if wrapTextMarshalV2 {
- if m, ok := m.(encoding.TextMarshaler); ok {
- return m.MarshalText()
- }
-
- opts := prototext.MarshalOptions{
- AllowPartial: true,
- EmitUnknown: true,
- }
- if !tm.Compact {
- opts.Indent = " "
- }
- if !tm.ExpandAny {
- opts.Resolver = (*protoregistry.Types)(nil)
- }
- return opts.Marshal(mr.Interface())
- } else {
- w := &textWriter{
- compact: tm.Compact,
- expandAny: tm.ExpandAny,
- complete: true,
- }
-
- if m, ok := m.(encoding.TextMarshaler); ok {
- b, err := m.MarshalText()
- if err != nil {
- return nil, err
- }
- w.Write(b)
- return w.buf, nil
- }
-
- err := w.writeMessage(mr)
- return w.buf, err
- }
-}
-
-var (
- defaultTextMarshaler = TextMarshaler{}
- compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// MarshalText writes the proto text format of m to w.
-func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
-
-// MarshalTextString returns a proto text formatted string of m.
-func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
-
-// CompactText writes the compact proto text format of m to w.
-func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
-
-// CompactTextString returns a compact proto text formatted string of m.
-func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
-
-var (
- newline = []byte("\n")
- endBraceNewline = []byte("}\n")
- posInf = []byte("inf")
- negInf = []byte("-inf")
- nan = []byte("nan")
-)
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
- compact bool // same as TextMarshaler.Compact
- expandAny bool // same as TextMarshaler.ExpandAny
- complete bool // whether the current position is a complete line
- indent int // indentation level; never negative
- buf []byte
-}
-
-func (w *textWriter) Write(p []byte) (n int, _ error) {
- newlines := bytes.Count(p, newline)
- if newlines == 0 {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, p...)
- w.complete = false
- return len(p), nil
- }
-
- frags := bytes.SplitN(p, newline, newlines+1)
- if w.compact {
- for i, frag := range frags {
- if i > 0 {
- w.buf = append(w.buf, ' ')
- n++
- }
- w.buf = append(w.buf, frag...)
- n += len(frag)
- }
- return n, nil
- }
-
- for i, frag := range frags {
- if w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, frag...)
- n += len(frag)
- if i+1 < len(frags) {
- w.buf = append(w.buf, '\n')
- n++
- }
- }
- w.complete = len(frags[len(frags)-1]) == 0
- return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
- if w.compact && c == '\n' {
- c = ' '
- }
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, c)
- w.complete = c == '\n'
- return nil
-}
-
-func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.complete = false
-
- if fd.Kind() != protoreflect.GroupKind {
- w.buf = append(w.buf, fd.Name()...)
- w.WriteByte(':')
- } else {
- // Use message type name for group field name.
- w.buf = append(w.buf, fd.Message().Name()...)
- }
-
- if !w.compact {
- w.WriteByte(' ')
- }
-}
-
-func requiresQuotes(u string) bool {
- // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
- for _, ch := range u {
- switch {
- case ch == '.' || ch == '/' || ch == '_':
- continue
- case '0' <= ch && ch <= '9':
- continue
- case 'A' <= ch && ch <= 'Z':
- continue
- case 'a' <= ch && ch <= 'z':
- continue
- default:
- return true
- }
- }
- return false
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
- md := m.Descriptor()
- fdURL := md.Fields().ByName("type_url")
- fdVal := md.Fields().ByName("value")
-
- url := m.Get(fdURL).String()
- mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
- if err != nil {
- return false, nil
- }
-
- b := m.Get(fdVal).Bytes()
- m2 := mt.New()
- if err := proto.Unmarshal(b, m2.Interface()); err != nil {
- return false, nil
- }
- w.Write([]byte("["))
- if requiresQuotes(url) {
- w.writeQuotedString(url)
- } else {
- w.Write([]byte(url))
- }
- if w.compact {
- w.Write([]byte("]:<"))
- } else {
- w.Write([]byte("]: <\n"))
- w.indent++
- }
- if err := w.writeMessage(m2); err != nil {
- return true, err
- }
- if w.compact {
- w.Write([]byte("> "))
- } else {
- w.indent--
- w.Write([]byte(">\n"))
- }
- return true, nil
-}
-
-func (w *textWriter) writeMessage(m protoreflect.Message) error {
- md := m.Descriptor()
- if w.expandAny && md.FullName() == "google.protobuf.Any" {
- if canExpand, err := w.writeProto3Any(m); canExpand {
- return err
- }
- }
-
- fds := md.Fields()
- for i := 0; i < fds.Len(); {
- fd := fds.Get(i)
- if od := fd.ContainingOneof(); od != nil {
- fd = m.WhichOneof(od)
- i += od.Fields().Len()
- } else {
- i++
- }
- if fd == nil || !m.Has(fd) {
- continue
- }
-
- switch {
- case fd.IsList():
- lv := m.Get(fd).List()
- for j := 0; j < lv.Len(); j++ {
- w.writeName(fd)
- v := lv.Get(j)
- if err := w.writeSingularValue(v, fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- }
- case fd.IsMap():
- kfd := fd.MapKey()
- vfd := fd.MapValue()
- mv := m.Get(fd).Map()
-
- type entry struct{ key, val protoreflect.Value }
- var entries []entry
- mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
- entries = append(entries, entry{k.Value(), v})
- return true
- })
- sort.Slice(entries, func(i, j int) bool {
- switch kfd.Kind() {
- case protoreflect.BoolKind:
- return !entries[i].key.Bool() && entries[j].key.Bool()
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- return entries[i].key.Int() < entries[j].key.Int()
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- return entries[i].key.Uint() < entries[j].key.Uint()
- case protoreflect.StringKind:
- return entries[i].key.String() < entries[j].key.String()
- default:
- panic("invalid kind")
- }
- })
- for _, entry := range entries {
- w.writeName(fd)
- w.WriteByte('<')
- if !w.compact {
- w.WriteByte('\n')
- }
- w.indent++
- w.writeName(kfd)
- if err := w.writeSingularValue(entry.key, kfd); err != nil {
- return err
- }
- w.WriteByte('\n')
- w.writeName(vfd)
- if err := w.writeSingularValue(entry.val, vfd); err != nil {
- return err
- }
- w.WriteByte('\n')
- w.indent--
- w.WriteByte('>')
- w.WriteByte('\n')
- }
- default:
- w.writeName(fd)
- if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- }
- }
-
- if b := m.GetUnknown(); len(b) > 0 {
- w.writeUnknownFields(b)
- }
- return w.writeExtensions(m)
-}
-
-func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
- switch fd.Kind() {
- case protoreflect.FloatKind, protoreflect.DoubleKind:
- switch vf := v.Float(); {
- case math.IsInf(vf, +1):
- w.Write(posInf)
- case math.IsInf(vf, -1):
- w.Write(negInf)
- case math.IsNaN(vf):
- w.Write(nan)
- default:
- fmt.Fprint(w, v.Interface())
- }
- case protoreflect.StringKind:
- // NOTE: This does not validate UTF-8 for historical reasons.
- w.writeQuotedString(string(v.String()))
- case protoreflect.BytesKind:
- w.writeQuotedString(string(v.Bytes()))
- case protoreflect.MessageKind, protoreflect.GroupKind:
- var bra, ket byte = '<', '>'
- if fd.Kind() == protoreflect.GroupKind {
- bra, ket = '{', '}'
- }
- w.WriteByte(bra)
- if !w.compact {
- w.WriteByte('\n')
- }
- w.indent++
- m := v.Message()
- if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
- b, err := m2.MarshalText()
- if err != nil {
- return err
- }
- w.Write(b)
- } else {
- w.writeMessage(m)
- }
- w.indent--
- w.WriteByte(ket)
- case protoreflect.EnumKind:
- if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
- fmt.Fprint(w, ev.Name())
- } else {
- fmt.Fprint(w, v.Enum())
- }
- default:
- fmt.Fprint(w, v.Interface())
- }
- return nil
-}
-
-// writeQuotedString writes a quoted string in the protocol buffer text format.
-func (w *textWriter) writeQuotedString(s string) {
- w.WriteByte('"')
- for i := 0; i < len(s); i++ {
- switch c := s[i]; c {
- case '\n':
- w.buf = append(w.buf, `\n`...)
- case '\r':
- w.buf = append(w.buf, `\r`...)
- case '\t':
- w.buf = append(w.buf, `\t`...)
- case '"':
- w.buf = append(w.buf, `\"`...)
- case '\\':
- w.buf = append(w.buf, `\\`...)
- default:
- if isPrint := c >= 0x20 && c < 0x7f; isPrint {
- w.buf = append(w.buf, c)
- } else {
- w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
- }
- }
- }
- w.WriteByte('"')
-}
-
-func (w *textWriter) writeUnknownFields(b []byte) {
- if !w.compact {
- fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
- }
-
- for len(b) > 0 {
- num, wtyp, n := protowire.ConsumeTag(b)
- if n < 0 {
- return
- }
- b = b[n:]
-
- if wtyp == protowire.EndGroupType {
- w.indent--
- w.Write(endBraceNewline)
- continue
- }
- fmt.Fprint(w, num)
- if wtyp != protowire.StartGroupType {
- w.WriteByte(':')
- }
- if !w.compact || wtyp == protowire.StartGroupType {
- w.WriteByte(' ')
- }
- switch wtyp {
- case protowire.VarintType:
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.Fixed32Type:
- v, n := protowire.ConsumeFixed32(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.Fixed64Type:
- v, n := protowire.ConsumeFixed64(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.BytesType:
- v, n := protowire.ConsumeBytes(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprintf(w, "%q", v)
- case protowire.StartGroupType:
- w.WriteByte('{')
- w.indent++
- default:
- fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
- }
- w.WriteByte('\n')
- }
-}
-
-// writeExtensions writes all the extensions in m.
-func (w *textWriter) writeExtensions(m protoreflect.Message) error {
- md := m.Descriptor()
- if md.ExtensionRanges().Len() == 0 {
- return nil
- }
-
- type ext struct {
- desc protoreflect.FieldDescriptor
- val protoreflect.Value
- }
- var exts []ext
- m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- if fd.IsExtension() {
- exts = append(exts, ext{fd, v})
- }
- return true
- })
- sort.Slice(exts, func(i, j int) bool {
- return exts[i].desc.Number() < exts[j].desc.Number()
- })
-
- for _, ext := range exts {
- // For message set, use the name of the message as the extension name.
- name := string(ext.desc.FullName())
- if isMessageSet(ext.desc.ContainingMessage()) {
- name = strings.TrimSuffix(name, ".message_set_extension")
- }
-
- if !ext.desc.IsList() {
- if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
- return err
- }
- } else {
- lv := ext.val.List()
- for i := 0; i < lv.Len(); i++ {
- if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
- fmt.Fprintf(w, "[%s]:", name)
- if !w.compact {
- w.WriteByte(' ')
- }
- if err := w.writeSingularValue(v, fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- return nil
-}
-
-func (w *textWriter) writeIndent() {
- if !w.complete {
- return
- }
- for i := 0; i < w.indent*2; i++ {
- w.buf = append(w.buf, ' ')
- }
- w.complete = false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
deleted file mode 100644
index d7c28da5..00000000
--- a/vendor/github.com/golang/protobuf/proto/wire.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/runtime/protoiface"
-)
-
-// Size returns the size in bytes of the wire-format encoding of m.
-func Size(m Message) int {
- if m == nil {
- return 0
- }
- mi := MessageV2(m)
- return protoV2.Size(mi)
-}
-
-// Marshal returns the wire-format encoding of m.
-func Marshal(m Message) ([]byte, error) {
- b, err := marshalAppend(nil, m, false)
- if b == nil {
- b = zeroBytes
- }
- return b, err
-}
-
-var zeroBytes = make([]byte, 0, 0)
-
-func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
- if m == nil {
- return nil, ErrNil
- }
- mi := MessageV2(m)
- nbuf, err := protoV2.MarshalOptions{
- Deterministic: deterministic,
- AllowPartial: true,
- }.MarshalAppend(buf, mi)
- if err != nil {
- return buf, err
- }
- if len(buf) == len(nbuf) {
- if !mi.ProtoReflect().IsValid() {
- return buf, ErrNil
- }
- }
- return nbuf, checkRequiredNotSet(mi)
-}
-
-// Unmarshal parses a wire-format message in b and places the decoded results in m.
-//
-// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
-// removed. Use UnmarshalMerge to preserve and append to existing data.
-func Unmarshal(b []byte, m Message) error {
- m.Reset()
- return UnmarshalMerge(b, m)
-}
-
-// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
-func UnmarshalMerge(b []byte, m Message) error {
- mi := MessageV2(m)
- out, err := protoV2.UnmarshalOptions{
- AllowPartial: true,
- Merge: true,
- }.UnmarshalState(protoiface.UnmarshalInput{
- Buf: b,
- Message: mi.ProtoReflect(),
- })
- if err != nil {
- return err
- }
- if out.Flags&protoiface.UnmarshalInitialized > 0 {
- return nil
- }
- return checkRequiredNotSet(mi)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
deleted file mode 100644
index 398e3485..00000000
--- a/vendor/github.com/golang/protobuf/proto/wrappers.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-// Bool stores v in a new bool value and returns a pointer to it.
-func Bool(v bool) *bool { return &v }
-
-// Int stores v in a new int32 value and returns a pointer to it.
-//
-// Deprecated: Use Int32 instead.
-func Int(v int) *int32 { return Int32(int32(v)) }
-
-// Int32 stores v in a new int32 value and returns a pointer to it.
-func Int32(v int32) *int32 { return &v }
-
-// Int64 stores v in a new int64 value and returns a pointer to it.
-func Int64(v int64) *int64 { return &v }
-
-// Uint32 stores v in a new uint32 value and returns a pointer to it.
-func Uint32(v uint32) *uint32 { return &v }
-
-// Uint64 stores v in a new uint64 value and returns a pointer to it.
-func Uint64(v uint64) *uint64 { return &v }
-
-// Float32 stores v in a new float32 value and returns a pointer to it.
-func Float32(v float32) *float32 { return &v }
-
-// Float64 stores v in a new float64 value and returns a pointer to it.
-func Float64(v float64) *float64 { return &v }
-
-// String stores v in a new string value and returns a pointer to it.
-func String(v string) *string { return &v }
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
deleted file mode 100644
index a76f8076..00000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
-
-package timestamp
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/timestamp.proto.
-
-type Timestamp = timestamppb.Timestamp
-
-var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
- 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
- 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
-func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
- if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
- file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
- file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
- file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/go-github/v48/AUTHORS b/vendor/github.com/google/go-github/v48/AUTHORS
deleted file mode 100644
index 5e40cd1f..00000000
--- a/vendor/github.com/google/go-github/v48/AUTHORS
+++ /dev/null
@@ -1,433 +0,0 @@
-# This is the official list of go-github authors for copyright purposes.
-#
-# This does not necessarily list everyone who has contributed code, since in
-# some cases, their employer may be the copyright holder. To see the full list
-# of contributors, see the revision history in source control or
-# https://github.com/google/go-github/graphs/contributors.
-#
-# Authors who wish to be recognized in this file should add themselves (or
-# their employer, as appropriate).
-
-178inaba
-2BFL
-413x
-Abed Kibbe
-Abhinav Gupta
-Abhishek Veeramalla
-aboy
-adrienzieba
-afdesk
-Ahmed Hagy
-Aidan Steele
-Ainsley Chong
-ajz01
-Akeda Bagus
-Akhil Mohan
-Alec Thomas
-Aleks Clark
-Alex Bramley
-Alex Orr
-Alex Su
-Alex Unger
-Alexander Harkness
-Alexis Gauthiez
-Ali Farooq
-Allan Guwatudde
-Allen Sun
-Amey Sakhadeo
-Anders Janmyr
-Andreas Garnæs
-Andrew Ryabchun
-Andrew Svoboda
-Andy Grunwald
-Andy Hume
-Andy Lindeman
-angie pinilla
-anjanashenoy
-Anshuman Bhartiya
-Antoine
-Antoine Pelisse
-Anton Nguyen
-Anubha Kushwaha
-appilon
-aprp
-Aravind
-Arda Kuyumcu
-Arıl Bozoluk
-Asier Marruedo
-Austin Burdine
-Austin Dizzy
-Azuka Okuleye
-Ben Batha
-Benjamen Keroack
-Beshr Kayali
-Beyang Liu
-Billy Keyes
-Billy Lynch
-Bjorn Neergaard
-Björn Häuser
-boljen
-Bracken
-Brad Harris
-Brad Moylan
-Bradley Falzon
-Bradley McAllister
-Brandon Butler
-Brandon Cook